Line data Source code
1 : //!
2 : //! This provides an abstraction to store PostgreSQL relations and other files
3 : //! in the key-value store that implements the Repository interface.
4 : //!
5 : //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as
6 : //! walingest.rs handles a few things like implicit relation creation and extension.
7 : //! Clarify that)
8 : //!
9 : use super::tenant::{PageReconstructError, Timeline};
10 : use crate::context::RequestContext;
11 : use crate::keyspace::{KeySpace, KeySpaceAccum};
12 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id;
13 : use crate::walrecord::NeonWalRecord;
14 : use crate::{aux_file, repository::*};
15 : use anyhow::{ensure, Context};
16 : use bytes::{Buf, Bytes, BytesMut};
17 : use enum_map::Enum;
18 : use pageserver_api::key::{
19 : dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key,
20 : relmap_file_key, repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key,
21 : slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range,
22 : CompactKey, AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY,
23 : };
24 : use pageserver_api::keyspace::SparseKeySpace;
25 : use pageserver_api::models::AuxFilePolicy;
26 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
27 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
28 : use postgres_ffi::BLCKSZ;
29 : use postgres_ffi::{Oid, RepOriginId, TimestampTz, TransactionId};
30 : use serde::{Deserialize, Serialize};
31 : use std::collections::{hash_map, HashMap, HashSet};
32 : use std::ops::ControlFlow;
33 : use std::ops::Range;
34 : use strum::IntoEnumIterator;
35 : use tokio_util::sync::CancellationToken;
36 : use tracing::{debug, info, trace, warn};
37 : use utils::bin_ser::DeserializeError;
38 : use utils::pausable_failpoint;
39 : use utils::{bin_ser::BeSer, lsn::Lsn};
40 :
41 : /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached.
42 : pub const MAX_AUX_FILE_DELTAS: usize = 1024;
43 :
44 : /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached.
45 : pub const MAX_AUX_FILE_V2_DELTAS: usize = 64;
46 :
47 : #[derive(Debug)]
48 : pub enum LsnForTimestamp {
49 : /// Found commits both before and after the given timestamp
50 : Present(Lsn),
51 :
52 : /// Found no commits after the given timestamp, this means
53 : /// that the newest data in the branch is older than the given
54 : /// timestamp.
55 : ///
56 : /// All commits <= LSN happened before the given timestamp
57 : Future(Lsn),
58 :
59 : /// The queried timestamp is past our horizon we look back at (PITR)
60 : ///
61 : /// All commits > LSN happened after the given timestamp,
62 : /// but any commits < LSN might have happened before or after
63 : /// the given timestamp. We don't know because no data before
64 : /// the given lsn is available.
65 : Past(Lsn),
66 :
67 : /// We have found no commit with a timestamp,
68 : /// so we can't return anything meaningful.
69 : ///
70 : /// The associated LSN is the lower bound value we can safely
71 : /// create branches on, but no statement is made if it is
72 : /// older or newer than the timestamp.
73 : ///
74 : /// This variant can e.g. be returned right after a
75 : /// cluster import.
76 : NoData(Lsn),
77 : }
78 :
79 0 : #[derive(Debug, thiserror::Error)]
80 : pub(crate) enum CalculateLogicalSizeError {
81 : #[error("cancelled")]
82 : Cancelled,
83 :
84 : /// Something went wrong while reading the metadata we use to calculate logical size
85 : /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`]
86 : /// in the `From` implementation for this variant.
87 : #[error(transparent)]
88 : PageRead(PageReconstructError),
89 :
90 : /// Something went wrong deserializing metadata that we read to calculate logical size
91 : #[error("decode error: {0}")]
92 : Decode(#[from] DeserializeError),
93 : }
94 :
95 0 : #[derive(Debug, thiserror::Error)]
96 : pub(crate) enum CollectKeySpaceError {
97 : #[error(transparent)]
98 : Decode(#[from] DeserializeError),
99 : #[error(transparent)]
100 : PageRead(PageReconstructError),
101 : #[error("cancelled")]
102 : Cancelled,
103 : }
104 :
105 : impl From<PageReconstructError> for CollectKeySpaceError {
106 0 : fn from(err: PageReconstructError) -> Self {
107 0 : match err {
108 0 : PageReconstructError::Cancelled => Self::Cancelled,
109 0 : err => Self::PageRead(err),
110 : }
111 0 : }
112 : }
113 :
114 : impl From<PageReconstructError> for CalculateLogicalSizeError {
115 0 : fn from(pre: PageReconstructError) -> Self {
116 0 : match pre {
117 0 : PageReconstructError::Cancelled => Self::Cancelled,
118 0 : _ => Self::PageRead(pre),
119 : }
120 0 : }
121 : }
122 :
123 0 : #[derive(Debug, thiserror::Error)]
124 : pub enum RelationError {
125 : #[error("Relation Already Exists")]
126 : AlreadyExists,
127 : #[error("invalid relnode")]
128 : InvalidRelnode,
129 : #[error(transparent)]
130 : Other(#[from] anyhow::Error),
131 : }
132 :
133 : ///
134 : /// This impl provides all the functionality to store PostgreSQL relations, SLRUs,
135 : /// and other special kinds of files, in a versioned key-value store. The
136 : /// Timeline struct provides the key-value store.
137 : ///
138 : /// This is a separate impl, so that we can easily include all these functions in a Timeline
139 : /// implementation, and might be moved into a separate struct later.
140 : impl Timeline {
141 : /// Start ingesting a WAL record, or other atomic modification of
142 : /// the timeline.
143 : ///
144 : /// This provides a transaction-like interface to perform a bunch
145 : /// of modifications atomically.
146 : ///
147 : /// To ingest a WAL record, call begin_modification(lsn) to get a
148 : /// DatadirModification object. Use the functions in the object to
149 : /// modify the repository state, updating all the pages and metadata
150 : /// that the WAL record affects. When you're done, call commit() to
151 : /// commit the changes.
152 : ///
153 : /// Lsn stored in modification is advanced by `ingest_record` and
154 : /// is used by `commit()` to update `last_record_lsn`.
155 : ///
156 : /// Calling commit() will flush all the changes and reset the state,
157 : /// so the `DatadirModification` struct can be reused to perform the next modification.
158 : ///
159 : /// Note that any pending modifications you make through the
160 : /// modification object won't be visible to calls to the 'get' and list
161 : /// functions of the timeline until you finish! And if you update the
162 : /// same page twice, the last update wins.
163 : ///
164 805176 : pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification
165 805176 : where
166 805176 : Self: Sized,
167 805176 : {
168 805176 : DatadirModification {
169 805176 : tline: self,
170 805176 : pending_lsns: Vec::new(),
171 805176 : pending_metadata_pages: HashMap::new(),
172 805176 : pending_data_pages: Vec::new(),
173 805176 : pending_zero_data_pages: Default::default(),
174 805176 : pending_deletions: Vec::new(),
175 805176 : pending_nblocks: 0,
176 805176 : pending_directory_entries: Vec::new(),
177 805176 : pending_bytes: 0,
178 805176 : lsn,
179 805176 : }
180 805176 : }
181 :
182 : //------------------------------------------------------------------------------
183 : // Public GET functions
184 : //------------------------------------------------------------------------------
185 :
186 : /// Look up given page version.
187 55152 : pub(crate) async fn get_rel_page_at_lsn(
188 55152 : &self,
189 55152 : tag: RelTag,
190 55152 : blknum: BlockNumber,
191 55152 : version: Version<'_>,
192 55152 : ctx: &RequestContext,
193 55152 : ) -> Result<Bytes, PageReconstructError> {
194 55152 : if tag.relnode == 0 {
195 0 : return Err(PageReconstructError::Other(
196 0 : RelationError::InvalidRelnode.into(),
197 0 : ));
198 55152 : }
199 :
200 55152 : let nblocks = self.get_rel_size(tag, version, ctx).await?;
201 55152 : if blknum >= nblocks {
202 0 : debug!(
203 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
204 0 : tag,
205 0 : blknum,
206 0 : version.get_lsn(),
207 : nblocks
208 : );
209 0 : return Ok(ZERO_PAGE.clone());
210 55152 : }
211 55152 :
212 55152 : let key = rel_block_to_key(tag, blknum);
213 55152 : version.get(self, key, ctx).await
214 55152 : }
215 :
216 : // Get size of a database in blocks
217 0 : pub(crate) async fn get_db_size(
218 0 : &self,
219 0 : spcnode: Oid,
220 0 : dbnode: Oid,
221 0 : version: Version<'_>,
222 0 : ctx: &RequestContext,
223 0 : ) -> Result<usize, PageReconstructError> {
224 0 : let mut total_blocks = 0;
225 :
226 0 : let rels = self.list_rels(spcnode, dbnode, version, ctx).await?;
227 :
228 0 : for rel in rels {
229 0 : let n_blocks = self.get_rel_size(rel, version, ctx).await?;
230 0 : total_blocks += n_blocks as usize;
231 : }
232 0 : Ok(total_blocks)
233 0 : }
234 :
235 : /// Get size of a relation file
236 73302 : pub(crate) async fn get_rel_size(
237 73302 : &self,
238 73302 : tag: RelTag,
239 73302 : version: Version<'_>,
240 73302 : ctx: &RequestContext,
241 73302 : ) -> Result<BlockNumber, PageReconstructError> {
242 73302 : if tag.relnode == 0 {
243 0 : return Err(PageReconstructError::Other(
244 0 : RelationError::InvalidRelnode.into(),
245 0 : ));
246 73302 : }
247 :
248 73302 : if let Some(nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
249 57882 : return Ok(nblocks);
250 15420 : }
251 15420 :
252 15420 : if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM)
253 0 : && !self.get_rel_exists(tag, version, ctx).await?
254 : {
255 : // FIXME: Postgres sometimes calls smgrcreate() to create
256 : // FSM, and smgrnblocks() on it immediately afterwards,
257 : // without extending it. Tolerate that by claiming that
258 : // any non-existent FSM fork has size 0.
259 0 : return Ok(0);
260 15420 : }
261 15420 :
262 15420 : let key = rel_size_to_key(tag);
263 15420 : let mut buf = version.get(self, key, ctx).await?;
264 15408 : let nblocks = buf.get_u32_le();
265 15408 :
266 15408 : self.update_cached_rel_size(tag, version.get_lsn(), nblocks);
267 15408 :
268 15408 : Ok(nblocks)
269 73302 : }
270 :
271 : /// Does relation exist?
272 18150 : pub(crate) async fn get_rel_exists(
273 18150 : &self,
274 18150 : tag: RelTag,
275 18150 : version: Version<'_>,
276 18150 : ctx: &RequestContext,
277 18150 : ) -> Result<bool, PageReconstructError> {
278 18150 : if tag.relnode == 0 {
279 0 : return Err(PageReconstructError::Other(
280 0 : RelationError::InvalidRelnode.into(),
281 0 : ));
282 18150 : }
283 :
284 : // first try to lookup relation in cache
285 18150 : if let Some(_nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
286 18096 : return Ok(true);
287 54 : }
288 : // then check if the database was already initialized.
289 : // get_rel_exists can be called before dbdir is created.
290 54 : let buf = version.get(self, DBDIR_KEY, ctx).await?;
291 54 : let dbdirs = DbDirectory::des(&buf)?.dbdirs;
292 54 : if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) {
293 0 : return Ok(false);
294 54 : }
295 54 : // fetch directory listing
296 54 : let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
297 54 : let buf = version.get(self, key, ctx).await?;
298 :
299 54 : let dir = RelDirectory::des(&buf)?;
300 54 : Ok(dir.rels.contains(&(tag.relnode, tag.forknum)))
301 18150 : }
302 :
303 : /// Get a list of all existing relations in given tablespace and database.
304 : ///
305 : /// # Cancel-Safety
306 : ///
307 : /// This method is cancellation-safe.
308 0 : pub(crate) async fn list_rels(
309 0 : &self,
310 0 : spcnode: Oid,
311 0 : dbnode: Oid,
312 0 : version: Version<'_>,
313 0 : ctx: &RequestContext,
314 0 : ) -> Result<HashSet<RelTag>, PageReconstructError> {
315 0 : // fetch directory listing
316 0 : let key = rel_dir_to_key(spcnode, dbnode);
317 0 : let buf = version.get(self, key, ctx).await?;
318 :
319 0 : let dir = RelDirectory::des(&buf)?;
320 0 : let rels: HashSet<RelTag> =
321 0 : HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
322 0 : spcnode,
323 0 : dbnode,
324 0 : relnode: *relnode,
325 0 : forknum: *forknum,
326 0 : }));
327 0 :
328 0 : Ok(rels)
329 0 : }
330 :
331 : /// Get the whole SLRU segment
332 0 : pub(crate) async fn get_slru_segment(
333 0 : &self,
334 0 : kind: SlruKind,
335 0 : segno: u32,
336 0 : lsn: Lsn,
337 0 : ctx: &RequestContext,
338 0 : ) -> Result<Bytes, PageReconstructError> {
339 0 : let n_blocks = self
340 0 : .get_slru_segment_size(kind, segno, Version::Lsn(lsn), ctx)
341 0 : .await?;
342 0 : let mut segment = BytesMut::with_capacity(n_blocks as usize * BLCKSZ as usize);
343 0 : for blkno in 0..n_blocks {
344 0 : let block = self
345 0 : .get_slru_page_at_lsn(kind, segno, blkno, lsn, ctx)
346 0 : .await?;
347 0 : segment.extend_from_slice(&block[..BLCKSZ as usize]);
348 : }
349 0 : Ok(segment.freeze())
350 0 : }
351 :
352 : /// Look up given SLRU page version.
353 0 : pub(crate) async fn get_slru_page_at_lsn(
354 0 : &self,
355 0 : kind: SlruKind,
356 0 : segno: u32,
357 0 : blknum: BlockNumber,
358 0 : lsn: Lsn,
359 0 : ctx: &RequestContext,
360 0 : ) -> Result<Bytes, PageReconstructError> {
361 0 : let key = slru_block_to_key(kind, segno, blknum);
362 0 : self.get(key, lsn, ctx).await
363 0 : }
364 :
365 : /// Get size of an SLRU segment
366 0 : pub(crate) async fn get_slru_segment_size(
367 0 : &self,
368 0 : kind: SlruKind,
369 0 : segno: u32,
370 0 : version: Version<'_>,
371 0 : ctx: &RequestContext,
372 0 : ) -> Result<BlockNumber, PageReconstructError> {
373 0 : let key = slru_segment_size_to_key(kind, segno);
374 0 : let mut buf = version.get(self, key, ctx).await?;
375 0 : Ok(buf.get_u32_le())
376 0 : }
377 :
378 : /// Get size of an SLRU segment
379 0 : pub(crate) async fn get_slru_segment_exists(
380 0 : &self,
381 0 : kind: SlruKind,
382 0 : segno: u32,
383 0 : version: Version<'_>,
384 0 : ctx: &RequestContext,
385 0 : ) -> Result<bool, PageReconstructError> {
386 0 : // fetch directory listing
387 0 : let key = slru_dir_to_key(kind);
388 0 : let buf = version.get(self, key, ctx).await?;
389 :
390 0 : let dir = SlruSegmentDirectory::des(&buf)?;
391 0 : Ok(dir.segments.contains(&segno))
392 0 : }
393 :
394 : /// Locate LSN, such that all transactions that committed before
395 : /// 'search_timestamp' are visible, but nothing newer is.
396 : ///
397 : /// This is not exact. Commit timestamps are not guaranteed to be ordered,
398 : /// so it's not well defined which LSN you get if there were multiple commits
399 : /// "in flight" at that point in time.
400 : ///
401 0 : pub(crate) async fn find_lsn_for_timestamp(
402 0 : &self,
403 0 : search_timestamp: TimestampTz,
404 0 : cancel: &CancellationToken,
405 0 : ctx: &RequestContext,
406 0 : ) -> Result<LsnForTimestamp, PageReconstructError> {
407 0 : pausable_failpoint!("find-lsn-for-timestamp-pausable");
408 :
409 0 : let gc_cutoff_lsn_guard = self.get_latest_gc_cutoff_lsn();
410 0 : // We use this method to figure out the branching LSN for the new branch, but the
411 0 : // GC cutoff could be before the branching point and we cannot create a new branch
412 0 : // with LSN < `ancestor_lsn`. Thus, pick the maximum of these two to be
413 0 : // on the safe side.
414 0 : let min_lsn = std::cmp::max(*gc_cutoff_lsn_guard, self.get_ancestor_lsn());
415 0 : let max_lsn = self.get_last_record_lsn();
416 0 :
417 0 : // LSNs are always 8-byte aligned. low/mid/high represent the
418 0 : // LSN divided by 8.
419 0 : let mut low = min_lsn.0 / 8;
420 0 : let mut high = max_lsn.0 / 8 + 1;
421 0 :
422 0 : let mut found_smaller = false;
423 0 : let mut found_larger = false;
424 :
425 0 : while low < high {
426 0 : if cancel.is_cancelled() {
427 0 : return Err(PageReconstructError::Cancelled);
428 0 : }
429 0 : // cannot overflow, high and low are both smaller than u64::MAX / 2
430 0 : let mid = (high + low) / 2;
431 :
432 0 : let cmp = self
433 0 : .is_latest_commit_timestamp_ge_than(
434 0 : search_timestamp,
435 0 : Lsn(mid * 8),
436 0 : &mut found_smaller,
437 0 : &mut found_larger,
438 0 : ctx,
439 0 : )
440 0 : .await?;
441 :
442 0 : if cmp {
443 0 : high = mid;
444 0 : } else {
445 0 : low = mid + 1;
446 0 : }
447 : }
448 : // If `found_smaller == true`, `low = t + 1` where `t` is the target LSN,
449 : // so the LSN of the last commit record before or at `search_timestamp`.
450 : // Remove one from `low` to get `t`.
451 : //
452 : // FIXME: it would be better to get the LSN of the previous commit.
453 : // Otherwise, if you restore to the returned LSN, the database will
454 : // include physical changes from later commits that will be marked
455 : // as aborted, and will need to be vacuumed away.
456 0 : let commit_lsn = Lsn((low - 1) * 8);
457 0 : match (found_smaller, found_larger) {
458 : (false, false) => {
459 : // This can happen if no commit records have been processed yet, e.g.
460 : // just after importing a cluster.
461 0 : Ok(LsnForTimestamp::NoData(min_lsn))
462 : }
463 : (false, true) => {
464 : // Didn't find any commit timestamps smaller than the request
465 0 : Ok(LsnForTimestamp::Past(min_lsn))
466 : }
467 0 : (true, _) if commit_lsn < min_lsn => {
468 0 : // the search above did set found_smaller to true but it never increased the lsn.
469 0 : // Then, low is still the old min_lsn, and the subtraction above gave a value
470 0 : // below the min_lsn. We should never do that.
471 0 : Ok(LsnForTimestamp::Past(min_lsn))
472 : }
473 : (true, false) => {
474 : // Only found commits with timestamps smaller than the request.
475 : // It's still a valid case for branch creation, return it.
476 : // And `update_gc_info()` ignores LSN for a `LsnForTimestamp::Future`
477 : // case, anyway.
478 0 : Ok(LsnForTimestamp::Future(commit_lsn))
479 : }
480 0 : (true, true) => Ok(LsnForTimestamp::Present(commit_lsn)),
481 : }
482 0 : }
483 :
484 : /// Subroutine of find_lsn_for_timestamp(). Returns true, if there are any
485 : /// commits that committed after 'search_timestamp', at LSN 'probe_lsn'.
486 : ///
487 : /// Additionally, sets 'found_smaller'/'found_Larger, if encounters any commits
488 : /// with a smaller/larger timestamp.
489 : ///
490 0 : pub(crate) async fn is_latest_commit_timestamp_ge_than(
491 0 : &self,
492 0 : search_timestamp: TimestampTz,
493 0 : probe_lsn: Lsn,
494 0 : found_smaller: &mut bool,
495 0 : found_larger: &mut bool,
496 0 : ctx: &RequestContext,
497 0 : ) -> Result<bool, PageReconstructError> {
498 0 : self.map_all_timestamps(probe_lsn, ctx, |timestamp| {
499 0 : if timestamp >= search_timestamp {
500 0 : *found_larger = true;
501 0 : return ControlFlow::Break(true);
502 0 : } else {
503 0 : *found_smaller = true;
504 0 : }
505 0 : ControlFlow::Continue(())
506 0 : })
507 0 : .await
508 0 : }
509 :
510 : /// Obtain the possible timestamp range for the given lsn.
511 : ///
512 : /// If the lsn has no timestamps, returns None. returns `(min, max, median)` if it has timestamps.
513 0 : pub(crate) async fn get_timestamp_for_lsn(
514 0 : &self,
515 0 : probe_lsn: Lsn,
516 0 : ctx: &RequestContext,
517 0 : ) -> Result<Option<TimestampTz>, PageReconstructError> {
518 0 : let mut max: Option<TimestampTz> = None;
519 0 : self.map_all_timestamps::<()>(probe_lsn, ctx, |timestamp| {
520 0 : if let Some(max_prev) = max {
521 0 : max = Some(max_prev.max(timestamp));
522 0 : } else {
523 0 : max = Some(timestamp);
524 0 : }
525 0 : ControlFlow::Continue(())
526 0 : })
527 0 : .await?;
528 :
529 0 : Ok(max)
530 0 : }
531 :
532 : /// Runs the given function on all the timestamps for a given lsn
533 : ///
534 : /// The return value is either given by the closure, or set to the `Default`
535 : /// impl's output.
536 0 : async fn map_all_timestamps<T: Default>(
537 0 : &self,
538 0 : probe_lsn: Lsn,
539 0 : ctx: &RequestContext,
540 0 : mut f: impl FnMut(TimestampTz) -> ControlFlow<T>,
541 0 : ) -> Result<T, PageReconstructError> {
542 0 : for segno in self
543 0 : .list_slru_segments(SlruKind::Clog, Version::Lsn(probe_lsn), ctx)
544 0 : .await?
545 : {
546 0 : let nblocks = self
547 0 : .get_slru_segment_size(SlruKind::Clog, segno, Version::Lsn(probe_lsn), ctx)
548 0 : .await?;
549 0 : for blknum in (0..nblocks).rev() {
550 0 : let clog_page = self
551 0 : .get_slru_page_at_lsn(SlruKind::Clog, segno, blknum, probe_lsn, ctx)
552 0 : .await?;
553 :
554 0 : if clog_page.len() == BLCKSZ as usize + 8 {
555 0 : let mut timestamp_bytes = [0u8; 8];
556 0 : timestamp_bytes.copy_from_slice(&clog_page[BLCKSZ as usize..]);
557 0 : let timestamp = TimestampTz::from_be_bytes(timestamp_bytes);
558 0 :
559 0 : match f(timestamp) {
560 0 : ControlFlow::Break(b) => return Ok(b),
561 0 : ControlFlow::Continue(()) => (),
562 : }
563 0 : }
564 : }
565 : }
566 0 : Ok(Default::default())
567 0 : }
568 :
569 0 : pub(crate) async fn get_slru_keyspace(
570 0 : &self,
571 0 : version: Version<'_>,
572 0 : ctx: &RequestContext,
573 0 : ) -> Result<KeySpace, PageReconstructError> {
574 0 : let mut accum = KeySpaceAccum::new();
575 :
576 0 : for kind in SlruKind::iter() {
577 0 : let mut segments: Vec<u32> = self
578 0 : .list_slru_segments(kind, version, ctx)
579 0 : .await?
580 0 : .into_iter()
581 0 : .collect();
582 0 : segments.sort_unstable();
583 :
584 0 : for seg in segments {
585 0 : let block_count = self.get_slru_segment_size(kind, seg, version, ctx).await?;
586 :
587 0 : accum.add_range(
588 0 : slru_block_to_key(kind, seg, 0)..slru_block_to_key(kind, seg, block_count),
589 0 : );
590 : }
591 : }
592 :
593 0 : Ok(accum.to_keyspace())
594 0 : }
595 :
596 : /// Get a list of SLRU segments
597 0 : pub(crate) async fn list_slru_segments(
598 0 : &self,
599 0 : kind: SlruKind,
600 0 : version: Version<'_>,
601 0 : ctx: &RequestContext,
602 0 : ) -> Result<HashSet<u32>, PageReconstructError> {
603 0 : // fetch directory entry
604 0 : let key = slru_dir_to_key(kind);
605 :
606 0 : let buf = version.get(self, key, ctx).await?;
607 0 : Ok(SlruSegmentDirectory::des(&buf)?.segments)
608 0 : }
609 :
610 0 : pub(crate) async fn get_relmap_file(
611 0 : &self,
612 0 : spcnode: Oid,
613 0 : dbnode: Oid,
614 0 : version: Version<'_>,
615 0 : ctx: &RequestContext,
616 0 : ) -> Result<Bytes, PageReconstructError> {
617 0 : let key = relmap_file_key(spcnode, dbnode);
618 :
619 0 : let buf = version.get(self, key, ctx).await?;
620 0 : Ok(buf)
621 0 : }
622 :
623 864 : pub(crate) async fn list_dbdirs(
624 864 : &self,
625 864 : lsn: Lsn,
626 864 : ctx: &RequestContext,
627 864 : ) -> Result<HashMap<(Oid, Oid), bool>, PageReconstructError> {
628 : // fetch directory entry
629 9311 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
630 :
631 864 : Ok(DbDirectory::des(&buf)?.dbdirs)
632 864 : }
633 :
634 0 : pub(crate) async fn get_twophase_file(
635 0 : &self,
636 0 : xid: u64,
637 0 : lsn: Lsn,
638 0 : ctx: &RequestContext,
639 0 : ) -> Result<Bytes, PageReconstructError> {
640 0 : let key = twophase_file_key(xid);
641 0 : let buf = self.get(key, lsn, ctx).await?;
642 0 : Ok(buf)
643 0 : }
644 :
645 870 : pub(crate) async fn list_twophase_files(
646 870 : &self,
647 870 : lsn: Lsn,
648 870 : ctx: &RequestContext,
649 870 : ) -> Result<HashSet<u64>, PageReconstructError> {
650 : // fetch directory entry
651 9428 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
652 :
653 870 : if self.pg_version >= 17 {
654 0 : Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
655 : } else {
656 870 : Ok(TwoPhaseDirectory::des(&buf)?
657 : .xids
658 870 : .iter()
659 870 : .map(|x| u64::from(*x))
660 870 : .collect())
661 : }
662 870 : }
663 :
664 0 : pub(crate) async fn get_control_file(
665 0 : &self,
666 0 : lsn: Lsn,
667 0 : ctx: &RequestContext,
668 0 : ) -> Result<Bytes, PageReconstructError> {
669 0 : self.get(CONTROLFILE_KEY, lsn, ctx).await
670 0 : }
671 :
672 36 : pub(crate) async fn get_checkpoint(
673 36 : &self,
674 36 : lsn: Lsn,
675 36 : ctx: &RequestContext,
676 36 : ) -> Result<Bytes, PageReconstructError> {
677 36 : self.get(CHECKPOINT_KEY, lsn, ctx).await
678 36 : }
679 :
680 48 : async fn list_aux_files_v1(
681 48 : &self,
682 48 : lsn: Lsn,
683 48 : ctx: &RequestContext,
684 48 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
685 48 : match self.get(AUX_FILES_KEY, lsn, ctx).await {
686 30 : Ok(buf) => Ok(AuxFilesDirectory::des(&buf)?.files),
687 18 : Err(e) => {
688 18 : // This is expected: historical databases do not have the key.
689 18 : debug!("Failed to get info about AUX files: {}", e);
690 18 : Ok(HashMap::new())
691 : }
692 : }
693 48 : }
694 :
695 72 : async fn list_aux_files_v2(
696 72 : &self,
697 72 : lsn: Lsn,
698 72 : ctx: &RequestContext,
699 72 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
700 72 : let kv = self
701 72 : .scan(KeySpace::single(Key::metadata_aux_key_range()), lsn, ctx)
702 0 : .await?;
703 72 : let mut result = HashMap::new();
704 72 : let mut sz = 0;
705 180 : for (_, v) in kv {
706 108 : let v = v?;
707 108 : let v = aux_file::decode_file_value_bytes(&v)
708 108 : .context("value decode")
709 108 : .map_err(PageReconstructError::Other)?;
710 210 : for (fname, content) in v {
711 102 : sz += fname.len();
712 102 : sz += content.len();
713 102 : result.insert(fname, content);
714 102 : }
715 : }
716 72 : self.aux_file_size_estimator.on_initial(sz);
717 72 : Ok(result)
718 72 : }
719 :
720 0 : pub(crate) async fn trigger_aux_file_size_computation(
721 0 : &self,
722 0 : lsn: Lsn,
723 0 : ctx: &RequestContext,
724 0 : ) -> Result<(), PageReconstructError> {
725 0 : let current_policy = self.last_aux_file_policy.load();
726 0 : if let Some(AuxFilePolicy::V2) | Some(AuxFilePolicy::CrossValidation) = current_policy {
727 0 : self.list_aux_files_v2(lsn, ctx).await?;
728 0 : }
729 0 : Ok(())
730 0 : }
731 :
732 78 : pub(crate) async fn list_aux_files(
733 78 : &self,
734 78 : lsn: Lsn,
735 78 : ctx: &RequestContext,
736 78 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
737 78 : let current_policy = self.last_aux_file_policy.load();
738 78 : match current_policy {
739 : Some(AuxFilePolicy::V1) => {
740 6 : let res = self.list_aux_files_v1(lsn, ctx).await?;
741 6 : let empty_str = if res.is_empty() { ", empty" } else { "" };
742 6 : warn!(
743 0 : "this timeline is using deprecated aux file policy V1 (policy=v1{empty_str})"
744 : );
745 6 : Ok(res)
746 : }
747 : None => {
748 0 : let res = self.list_aux_files_v1(lsn, ctx).await?;
749 0 : if !res.is_empty() {
750 0 : warn!("this timeline is using deprecated aux file policy V1 (policy=None)");
751 0 : }
752 0 : Ok(res)
753 : }
754 66 : Some(AuxFilePolicy::V2) => self.list_aux_files_v2(lsn, ctx).await,
755 : Some(AuxFilePolicy::CrossValidation) => {
756 6 : let v1_result = self.list_aux_files_v1(lsn, ctx).await;
757 6 : let v2_result = self.list_aux_files_v2(lsn, ctx).await;
758 6 : match (v1_result, v2_result) {
759 6 : (Ok(v1), Ok(v2)) => {
760 6 : if v1 != v2 {
761 0 : tracing::error!(
762 0 : "unmatched aux file v1 v2 result:\nv1 {v1:?}\nv2 {v2:?}"
763 : );
764 0 : return Err(PageReconstructError::Other(anyhow::anyhow!(
765 0 : "unmatched aux file v1 v2 result"
766 0 : )));
767 6 : }
768 6 : Ok(v1)
769 : }
770 0 : (Ok(_), Err(v2)) => {
771 0 : tracing::error!("aux file v1 returns Ok while aux file v2 returns an err");
772 0 : Err(v2)
773 : }
774 0 : (Err(v1), Ok(_)) => {
775 0 : tracing::error!("aux file v2 returns Ok while aux file v1 returns an err");
776 0 : Err(v1)
777 : }
778 0 : (Err(_), Err(v2)) => Err(v2),
779 : }
780 : }
781 : }
782 78 : }
783 :
784 0 : pub(crate) async fn get_replorigins(
785 0 : &self,
786 0 : lsn: Lsn,
787 0 : ctx: &RequestContext,
788 0 : ) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
789 0 : let kv = self
790 0 : .scan(KeySpace::single(repl_origin_key_range()), lsn, ctx)
791 0 : .await?;
792 0 : let mut result = HashMap::new();
793 0 : for (k, v) in kv {
794 0 : let v = v?;
795 0 : let origin_id = k.field6 as RepOriginId;
796 0 : let origin_lsn = Lsn::des(&v).unwrap();
797 0 : if origin_lsn != Lsn::INVALID {
798 0 : result.insert(origin_id, origin_lsn);
799 0 : }
800 : }
801 0 : Ok(result)
802 0 : }
803 :
804 : /// Does the same as get_current_logical_size but counted on demand.
805 : /// Used to initialize the logical size tracking on startup.
806 : ///
807 : /// Only relation blocks are counted currently. That excludes metadata,
808 : /// SLRUs, twophase files etc.
809 : ///
810 : /// # Cancel-Safety
811 : ///
812 : /// This method is cancellation-safe.
813 0 : pub(crate) async fn get_current_logical_size_non_incremental(
814 0 : &self,
815 0 : lsn: Lsn,
816 0 : ctx: &RequestContext,
817 0 : ) -> Result<u64, CalculateLogicalSizeError> {
818 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
819 :
820 : // Fetch list of database dirs and iterate them
821 0 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
822 0 : let dbdir = DbDirectory::des(&buf)?;
823 :
824 0 : let mut total_size: u64 = 0;
825 0 : for (spcnode, dbnode) in dbdir.dbdirs.keys() {
826 0 : for rel in self
827 0 : .list_rels(*spcnode, *dbnode, Version::Lsn(lsn), ctx)
828 0 : .await?
829 : {
830 0 : if self.cancel.is_cancelled() {
831 0 : return Err(CalculateLogicalSizeError::Cancelled);
832 0 : }
833 0 : let relsize_key = rel_size_to_key(rel);
834 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
835 0 : let relsize = buf.get_u32_le();
836 0 :
837 0 : total_size += relsize as u64;
838 : }
839 : }
840 0 : Ok(total_size * BLCKSZ as u64)
841 0 : }
842 :
843 : ///
844 : /// Get a KeySpace that covers all the Keys that are in use at the given LSN.
845 : /// Anything that's not listed maybe removed from the underlying storage (from
846 : /// that LSN forwards).
847 : ///
848 : /// The return value is (dense keyspace, sparse keyspace).
849 864 : pub(crate) async fn collect_keyspace(
850 864 : &self,
851 864 : lsn: Lsn,
852 864 : ctx: &RequestContext,
853 864 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
854 864 : // Iterate through key ranges, greedily packing them into partitions
855 864 : let mut result = KeySpaceAccum::new();
856 864 :
857 864 : // The dbdir metadata always exists
858 864 : result.add_key(DBDIR_KEY);
859 :
860 : // Fetch list of database dirs and iterate them
861 9311 : let dbdir = self.list_dbdirs(lsn, ctx).await?;
862 864 : let mut dbs: Vec<((Oid, Oid), bool)> = dbdir.into_iter().collect();
863 864 :
864 864 : dbs.sort_unstable_by(|(k_a, _), (k_b, _)| k_a.cmp(k_b));
865 864 : for ((spcnode, dbnode), has_relmap_file) in dbs {
866 0 : if has_relmap_file {
867 0 : result.add_key(relmap_file_key(spcnode, dbnode));
868 0 : }
869 0 : result.add_key(rel_dir_to_key(spcnode, dbnode));
870 :
871 0 : let mut rels: Vec<RelTag> = self
872 0 : .list_rels(spcnode, dbnode, Version::Lsn(lsn), ctx)
873 0 : .await?
874 0 : .into_iter()
875 0 : .collect();
876 0 : rels.sort_unstable();
877 0 : for rel in rels {
878 0 : let relsize_key = rel_size_to_key(rel);
879 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
880 0 : let relsize = buf.get_u32_le();
881 0 :
882 0 : result.add_range(rel_block_to_key(rel, 0)..rel_block_to_key(rel, relsize));
883 0 : result.add_key(relsize_key);
884 : }
885 : }
886 :
887 : // Iterate SLRUs next
888 2592 : for kind in [
889 864 : SlruKind::Clog,
890 864 : SlruKind::MultiXactMembers,
891 864 : SlruKind::MultiXactOffsets,
892 : ] {
893 2592 : let slrudir_key = slru_dir_to_key(kind);
894 2592 : result.add_key(slrudir_key);
895 28653 : let buf = self.get(slrudir_key, lsn, ctx).await?;
896 2592 : let dir = SlruSegmentDirectory::des(&buf)?;
897 2592 : let mut segments: Vec<u32> = dir.segments.iter().cloned().collect();
898 2592 : segments.sort_unstable();
899 2592 : for segno in segments {
900 0 : let segsize_key = slru_segment_size_to_key(kind, segno);
901 0 : let mut buf = self.get(segsize_key, lsn, ctx).await?;
902 0 : let segsize = buf.get_u32_le();
903 0 :
904 0 : result.add_range(
905 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, segsize),
906 0 : );
907 0 : result.add_key(segsize_key);
908 : }
909 : }
910 :
911 : // Then pg_twophase
912 864 : result.add_key(TWOPHASEDIR_KEY);
913 :
914 864 : let mut xids: Vec<u64> = self
915 864 : .list_twophase_files(lsn, ctx)
916 9426 : .await?
917 864 : .iter()
918 864 : .cloned()
919 864 : .collect();
920 864 : xids.sort_unstable();
921 864 : for xid in xids {
922 0 : result.add_key(twophase_file_key(xid));
923 0 : }
924 :
925 864 : result.add_key(CONTROLFILE_KEY);
926 864 : result.add_key(CHECKPOINT_KEY);
927 864 : if self.get(AUX_FILES_KEY, lsn, ctx).await.is_ok() {
928 12 : result.add_key(AUX_FILES_KEY);
929 852 : }
930 :
931 : // Add extra keyspaces in the test cases. Some test cases write keys into the storage without
932 : // creating directory keys. These test cases will add such keyspaces into `extra_test_dense_keyspace`
933 : // and the keys will not be garbage-colllected.
934 : #[cfg(test)]
935 : {
936 864 : let guard = self.extra_test_dense_keyspace.load();
937 864 : for kr in &guard.ranges {
938 0 : result.add_range(kr.clone());
939 0 : }
940 : }
941 :
942 864 : let dense_keyspace = result.to_keyspace();
943 864 : let sparse_keyspace = SparseKeySpace(KeySpace {
944 864 : ranges: vec![Key::metadata_aux_key_range(), repl_origin_key_range()],
945 864 : });
946 864 :
947 864 : if cfg!(debug_assertions) {
948 : // Verify if the sparse keyspaces are ordered and non-overlapping.
949 :
950 : // We do not use KeySpaceAccum for sparse_keyspace because we want to ensure each
951 : // category of sparse keys are split into their own image/delta files. If there
952 : // are overlapping keyspaces, they will be automatically merged by keyspace accum,
953 : // and we want the developer to keep the keyspaces separated.
954 :
955 864 : let ranges = &sparse_keyspace.0.ranges;
956 :
957 : // TODO: use a single overlaps_with across the codebase
958 864 : fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
959 864 : !(a.end <= b.start || b.end <= a.start)
960 864 : }
961 1728 : for i in 0..ranges.len() {
962 1728 : for j in 0..i {
963 864 : if overlaps_with(&ranges[i], &ranges[j]) {
964 0 : panic!(
965 0 : "overlapping sparse keyspace: {}..{} and {}..{}",
966 0 : ranges[i].start, ranges[i].end, ranges[j].start, ranges[j].end
967 0 : );
968 864 : }
969 : }
970 : }
971 864 : for i in 1..ranges.len() {
972 864 : assert!(
973 864 : ranges[i - 1].end <= ranges[i].start,
974 0 : "unordered sparse keyspace: {}..{} and {}..{}",
975 0 : ranges[i - 1].start,
976 0 : ranges[i - 1].end,
977 0 : ranges[i].start,
978 0 : ranges[i].end
979 : );
980 : }
981 0 : }
982 :
983 864 : Ok((dense_keyspace, sparse_keyspace))
984 864 : }
985 :
986 : /// Get cached size of relation if it not updated after specified LSN
987 1345620 : pub fn get_cached_rel_size(&self, tag: &RelTag, lsn: Lsn) -> Option<BlockNumber> {
988 1345620 : let rel_size_cache = self.rel_size_cache.read().unwrap();
989 1345620 : if let Some((cached_lsn, nblocks)) = rel_size_cache.map.get(tag) {
990 1345554 : if lsn >= *cached_lsn {
991 1330116 : return Some(*nblocks);
992 15438 : }
993 66 : }
994 15504 : None
995 1345620 : }
996 :
997 : /// Update cached relation size if there is no more recent update
998 15408 : pub fn update_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
999 15408 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1000 15408 :
1001 15408 : if lsn < rel_size_cache.complete_as_of {
1002 : // Do not cache old values. It's safe to cache the size on read, as long as
1003 : // the read was at an LSN since we started the WAL ingestion. Reasoning: we
1004 : // never evict values from the cache, so if the relation size changed after
1005 : // 'lsn', the new value is already in the cache.
1006 0 : return;
1007 15408 : }
1008 15408 :
1009 15408 : match rel_size_cache.map.entry(tag) {
1010 15408 : hash_map::Entry::Occupied(mut entry) => {
1011 15408 : let cached_lsn = entry.get_mut();
1012 15408 : if lsn >= cached_lsn.0 {
1013 0 : *cached_lsn = (lsn, nblocks);
1014 15408 : }
1015 : }
1016 0 : hash_map::Entry::Vacant(entry) => {
1017 0 : entry.insert((lsn, nblocks));
1018 0 : }
1019 : }
1020 15408 : }
1021 :
1022 : /// Store cached relation size
1023 866196 : pub fn set_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1024 866196 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1025 866196 : rel_size_cache.map.insert(tag, (lsn, nblocks));
1026 866196 : }
1027 :
1028 : /// Remove cached relation size
1029 6 : pub fn remove_cached_rel_size(&self, tag: &RelTag) {
1030 6 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1031 6 : rel_size_cache.map.remove(tag);
1032 6 : }
1033 : }
1034 :
1035 : /// DatadirModification represents an operation to ingest an atomic set of
1036 : /// updates to the repository.
1037 : ///
1038 : /// It is created by the 'begin_record' function. It is called for each WAL
1039 : /// record, so that all the modifications by a one WAL record appear atomic.
1040 : pub struct DatadirModification<'a> {
1041 : /// The timeline this modification applies to. You can access this to
1042 : /// read the state, but note that any pending updates are *not* reflected
1043 : /// in the state in 'tline' yet.
1044 : pub tline: &'a Timeline,
1045 :
1046 : /// Current LSN of the modification
1047 : lsn: Lsn,
1048 :
1049 : // The modifications are not applied directly to the underlying key-value store.
1050 : // The put-functions add the modifications here, and they are flushed to the
1051 : // underlying key-value store by the 'finish' function.
1052 : pending_lsns: Vec<Lsn>,
1053 : pending_deletions: Vec<(Range<Key>, Lsn)>,
1054 : pending_nblocks: i64,
1055 :
1056 : /// Metadata writes, indexed by key so that they can be read from not-yet-committed modifications
1057 : /// while ingesting subsequent records. See [`Self::is_data_key`] for the definition of 'metadata'.
1058 : pending_metadata_pages: HashMap<CompactKey, Vec<(Lsn, usize, Value)>>,
1059 :
1060 : /// Data writes, ready to be flushed into an ephemeral layer. See [`Self::is_data_key`] for
1061 : /// which keys are stored here.
1062 : pending_data_pages: Vec<(CompactKey, Lsn, usize, Value)>,
1063 :
1064 : // Sometimes during ingest, for example when extending a relation, we would like to write a zero page. However,
1065 : // if we encounter a write from postgres in the same wal record, we will drop this entry.
1066 : //
1067 : // Unlike other 'pending' fields, this does not last until the next call to commit(): it is flushed
1068 : // at the end of each wal record, and all these writes implicitly are at lsn Self::lsn
1069 : pending_zero_data_pages: HashSet<CompactKey>,
1070 :
1071 : /// For special "directory" keys that store key-value maps, track the size of the map
1072 : /// if it was updated in this modification.
1073 : pending_directory_entries: Vec<(DirectoryKind, usize)>,
1074 :
1075 : /// An **approximation** of how large our EphemeralFile write will be when committed.
1076 : pending_bytes: usize,
1077 : }
1078 :
1079 : impl<'a> DatadirModification<'a> {
1080 : // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
1081 : // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
1082 : // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
1083 : pub(crate) const MAX_PENDING_BYTES: usize = 8 * 1024 * 1024;
1084 :
1085 : /// Get the current lsn
1086 1254168 : pub(crate) fn get_lsn(&self) -> Lsn {
1087 1254168 : self.lsn
1088 1254168 : }
1089 :
1090 0 : pub(crate) fn approx_pending_bytes(&self) -> usize {
1091 0 : self.pending_bytes
1092 0 : }
1093 :
1094 0 : pub(crate) fn has_dirty_data_pages(&self) -> bool {
1095 0 : (!self.pending_data_pages.is_empty()) || (!self.pending_zero_data_pages.is_empty())
1096 0 : }
1097 :
1098 : /// Set the current lsn
1099 437574 : pub(crate) fn set_lsn(&mut self, lsn: Lsn) -> anyhow::Result<()> {
1100 437574 : ensure!(
1101 437574 : lsn >= self.lsn,
1102 0 : "setting an older lsn {} than {} is not allowed",
1103 : lsn,
1104 : self.lsn
1105 : );
1106 :
1107 : // If we are advancing LSN, then state from previous wal record should have been flushed.
1108 437574 : assert!(self.pending_zero_data_pages.is_empty());
1109 :
1110 437574 : if lsn > self.lsn {
1111 437574 : self.pending_lsns.push(self.lsn);
1112 437574 : self.lsn = lsn;
1113 437574 : }
1114 437574 : Ok(())
1115 437574 : }
1116 :
1117 : /// In this context, 'metadata' means keys that are only read by the pageserver internally, and 'data' means
1118 : /// keys that represent literal blocks that postgres can read. So data includes relation blocks and
1119 : /// SLRU blocks, which are read directly by postgres, and everything else is considered metadata.
1120 : ///
1121 : /// The distinction is important because data keys are handled on a fast path where dirty writes are
1122 : /// not readable until this modification is committed, whereas metadata keys are visible for read
1123 : /// via [`Self::get`] as soon as their record has been ingested.
1124 2988252 : fn is_data_key(key: &Key) -> bool {
1125 2988252 : key.is_rel_block_key() || key.is_slru_block_key()
1126 2988252 : }
1127 :
1128 : /// Initialize a completely new repository.
1129 : ///
1130 : /// This inserts the directory metadata entries that are assumed to
1131 : /// always exist.
1132 522 : pub fn init_empty(&mut self) -> anyhow::Result<()> {
1133 522 : let buf = DbDirectory::ser(&DbDirectory {
1134 522 : dbdirs: HashMap::new(),
1135 522 : })?;
1136 522 : self.pending_directory_entries.push((DirectoryKind::Db, 0));
1137 522 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1138 522 :
1139 522 : // Create AuxFilesDirectory
1140 522 : self.init_aux_dir()?;
1141 :
1142 522 : let buf = if self.tline.pg_version >= 17 {
1143 0 : TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
1144 0 : xids: HashSet::new(),
1145 0 : })
1146 : } else {
1147 522 : TwoPhaseDirectory::ser(&TwoPhaseDirectory {
1148 522 : xids: HashSet::new(),
1149 522 : })
1150 0 : }?;
1151 522 : self.pending_directory_entries
1152 522 : .push((DirectoryKind::TwoPhase, 0));
1153 522 : self.put(TWOPHASEDIR_KEY, Value::Image(buf.into()));
1154 :
1155 522 : let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
1156 522 : let empty_dir = Value::Image(buf);
1157 522 : self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
1158 522 : self.pending_directory_entries
1159 522 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1160 522 : self.put(
1161 522 : slru_dir_to_key(SlruKind::MultiXactMembers),
1162 522 : empty_dir.clone(),
1163 522 : );
1164 522 : self.pending_directory_entries
1165 522 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1166 522 : self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
1167 522 : self.pending_directory_entries
1168 522 : .push((DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets), 0));
1169 522 :
1170 522 : Ok(())
1171 522 : }
1172 :
1173 : #[cfg(test)]
1174 516 : pub fn init_empty_test_timeline(&mut self) -> anyhow::Result<()> {
1175 516 : self.init_empty()?;
1176 516 : self.put_control_file(bytes::Bytes::from_static(
1177 516 : b"control_file contents do not matter",
1178 516 : ))
1179 516 : .context("put_control_file")?;
1180 516 : self.put_checkpoint(bytes::Bytes::from_static(
1181 516 : b"checkpoint_file contents do not matter",
1182 516 : ))
1183 516 : .context("put_checkpoint_file")?;
1184 516 : Ok(())
1185 516 : }
1186 :
1187 : /// Put a new page version that can be constructed from a WAL record
1188 : ///
1189 : /// NOTE: this will *not* implicitly extend the relation, if the page is beyond the
1190 : /// current end-of-file. It's up to the caller to check that the relation size
1191 : /// matches the blocks inserted!
1192 436890 : pub fn put_rel_wal_record(
1193 436890 : &mut self,
1194 436890 : rel: RelTag,
1195 436890 : blknum: BlockNumber,
1196 436890 : rec: NeonWalRecord,
1197 436890 : ) -> anyhow::Result<()> {
1198 436890 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1199 436890 : self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
1200 436890 : Ok(())
1201 436890 : }
1202 :
1203 : // Same, but for an SLRU.
1204 24 : pub fn put_slru_wal_record(
1205 24 : &mut self,
1206 24 : kind: SlruKind,
1207 24 : segno: u32,
1208 24 : blknum: BlockNumber,
1209 24 : rec: NeonWalRecord,
1210 24 : ) -> anyhow::Result<()> {
1211 24 : self.put(
1212 24 : slru_block_to_key(kind, segno, blknum),
1213 24 : Value::WalRecord(rec),
1214 24 : );
1215 24 : Ok(())
1216 24 : }
1217 :
1218 : /// Like put_wal_record, but with ready-made image of the page.
1219 833598 : pub fn put_rel_page_image(
1220 833598 : &mut self,
1221 833598 : rel: RelTag,
1222 833598 : blknum: BlockNumber,
1223 833598 : img: Bytes,
1224 833598 : ) -> anyhow::Result<()> {
1225 833598 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1226 833598 : let key = rel_block_to_key(rel, blknum);
1227 833598 : if !key.is_valid_key_on_write_path() {
1228 0 : anyhow::bail!(
1229 0 : "the request contains data not supported by pageserver at {}",
1230 0 : key
1231 0 : );
1232 833598 : }
1233 833598 : self.put(rel_block_to_key(rel, blknum), Value::Image(img));
1234 833598 : Ok(())
1235 833598 : }
1236 :
1237 18 : pub fn put_slru_page_image(
1238 18 : &mut self,
1239 18 : kind: SlruKind,
1240 18 : segno: u32,
1241 18 : blknum: BlockNumber,
1242 18 : img: Bytes,
1243 18 : ) -> anyhow::Result<()> {
1244 18 : let key = slru_block_to_key(kind, segno, blknum);
1245 18 : if !key.is_valid_key_on_write_path() {
1246 0 : anyhow::bail!(
1247 0 : "the request contains data not supported by pageserver at {}",
1248 0 : key
1249 0 : );
1250 18 : }
1251 18 : self.put(key, Value::Image(img));
1252 18 : Ok(())
1253 18 : }
1254 :
1255 8994 : pub(crate) fn put_rel_page_image_zero(
1256 8994 : &mut self,
1257 8994 : rel: RelTag,
1258 8994 : blknum: BlockNumber,
1259 8994 : ) -> anyhow::Result<()> {
1260 8994 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1261 8994 : let key = rel_block_to_key(rel, blknum);
1262 8994 : if !key.is_valid_key_on_write_path() {
1263 0 : anyhow::bail!(
1264 0 : "the request contains data not supported by pageserver: {} @ {}",
1265 0 : key,
1266 0 : self.lsn
1267 0 : );
1268 8994 : }
1269 8994 : self.pending_zero_data_pages.insert(key.to_compact());
1270 8994 : self.pending_bytes += ZERO_PAGE.len();
1271 8994 : Ok(())
1272 8994 : }
1273 :
1274 0 : pub(crate) fn put_slru_page_image_zero(
1275 0 : &mut self,
1276 0 : kind: SlruKind,
1277 0 : segno: u32,
1278 0 : blknum: BlockNumber,
1279 0 : ) -> anyhow::Result<()> {
1280 0 : let key = slru_block_to_key(kind, segno, blknum);
1281 0 : if !key.is_valid_key_on_write_path() {
1282 0 : anyhow::bail!(
1283 0 : "the request contains data not supported by pageserver: {} @ {}",
1284 0 : key,
1285 0 : self.lsn
1286 0 : );
1287 0 : }
1288 0 : self.pending_zero_data_pages.insert(key.to_compact());
1289 0 : self.pending_bytes += ZERO_PAGE.len();
1290 0 : Ok(())
1291 0 : }
1292 :
1293 : /// Call this at the end of each WAL record.
1294 437592 : pub(crate) fn on_record_end(&mut self) {
1295 437592 : let pending_zero_data_pages = std::mem::take(&mut self.pending_zero_data_pages);
1296 446586 : for key in pending_zero_data_pages {
1297 8994 : self.put_data(key, Value::Image(ZERO_PAGE.clone()));
1298 8994 : }
1299 437592 : }
1300 :
1301 : /// Store a relmapper file (pg_filenode.map) in the repository
1302 48 : pub async fn put_relmap_file(
1303 48 : &mut self,
1304 48 : spcnode: Oid,
1305 48 : dbnode: Oid,
1306 48 : img: Bytes,
1307 48 : ctx: &RequestContext,
1308 48 : ) -> anyhow::Result<()> {
1309 : // Add it to the directory (if it doesn't exist already)
1310 48 : let buf = self.get(DBDIR_KEY, ctx).await?;
1311 48 : let mut dbdir = DbDirectory::des(&buf)?;
1312 :
1313 48 : let r = dbdir.dbdirs.insert((spcnode, dbnode), true);
1314 48 : if r.is_none() || r == Some(false) {
1315 : // The dbdir entry didn't exist, or it contained a
1316 : // 'false'. The 'insert' call already updated it with
1317 : // 'true', now write the updated 'dbdirs' map back.
1318 48 : let buf = DbDirectory::ser(&dbdir)?;
1319 48 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1320 48 :
1321 48 : // Create AuxFilesDirectory as well
1322 48 : self.init_aux_dir()?;
1323 0 : }
1324 48 : if r.is_none() {
1325 : // Create RelDirectory
1326 24 : let buf = RelDirectory::ser(&RelDirectory {
1327 24 : rels: HashSet::new(),
1328 24 : })?;
1329 24 : self.pending_directory_entries.push((DirectoryKind::Rel, 0));
1330 24 : self.put(
1331 24 : rel_dir_to_key(spcnode, dbnode),
1332 24 : Value::Image(Bytes::from(buf)),
1333 24 : );
1334 24 : }
1335 :
1336 48 : self.put(relmap_file_key(spcnode, dbnode), Value::Image(img));
1337 48 : Ok(())
1338 48 : }
1339 :
1340 0 : pub async fn put_twophase_file(
1341 0 : &mut self,
1342 0 : xid: u64,
1343 0 : img: Bytes,
1344 0 : ctx: &RequestContext,
1345 0 : ) -> anyhow::Result<()> {
1346 : // Add it to the directory entry
1347 0 : let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1348 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1349 0 : let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
1350 0 : if !dir.xids.insert(xid) {
1351 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1352 0 : }
1353 0 : self.pending_directory_entries
1354 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1355 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1356 : } else {
1357 0 : let xid = xid as u32;
1358 0 : let mut dir = TwoPhaseDirectory::des(&dirbuf)?;
1359 0 : if !dir.xids.insert(xid) {
1360 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1361 0 : }
1362 0 : self.pending_directory_entries
1363 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1364 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
1365 : };
1366 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
1367 0 :
1368 0 : self.put(twophase_file_key(xid), Value::Image(img));
1369 0 : Ok(())
1370 0 : }
1371 :
1372 0 : pub async fn set_replorigin(
1373 0 : &mut self,
1374 0 : origin_id: RepOriginId,
1375 0 : origin_lsn: Lsn,
1376 0 : ) -> anyhow::Result<()> {
1377 0 : let key = repl_origin_key(origin_id);
1378 0 : self.put(key, Value::Image(origin_lsn.ser().unwrap().into()));
1379 0 : Ok(())
1380 0 : }
1381 :
1382 0 : pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> anyhow::Result<()> {
1383 0 : self.set_replorigin(origin_id, Lsn::INVALID).await
1384 0 : }
1385 :
1386 522 : pub fn put_control_file(&mut self, img: Bytes) -> anyhow::Result<()> {
1387 522 : self.put(CONTROLFILE_KEY, Value::Image(img));
1388 522 : Ok(())
1389 522 : }
1390 :
1391 564 : pub fn put_checkpoint(&mut self, img: Bytes) -> anyhow::Result<()> {
1392 564 : self.put(CHECKPOINT_KEY, Value::Image(img));
1393 564 : Ok(())
1394 564 : }
1395 :
1396 0 : pub async fn drop_dbdir(
1397 0 : &mut self,
1398 0 : spcnode: Oid,
1399 0 : dbnode: Oid,
1400 0 : ctx: &RequestContext,
1401 0 : ) -> anyhow::Result<()> {
1402 0 : let total_blocks = self
1403 0 : .tline
1404 0 : .get_db_size(spcnode, dbnode, Version::Modified(self), ctx)
1405 0 : .await?;
1406 :
1407 : // Remove entry from dbdir
1408 0 : let buf = self.get(DBDIR_KEY, ctx).await?;
1409 0 : let mut dir = DbDirectory::des(&buf)?;
1410 0 : if dir.dbdirs.remove(&(spcnode, dbnode)).is_some() {
1411 0 : let buf = DbDirectory::ser(&dir)?;
1412 0 : self.pending_directory_entries
1413 0 : .push((DirectoryKind::Db, dir.dbdirs.len()));
1414 0 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1415 : } else {
1416 0 : warn!(
1417 0 : "dropped dbdir for spcnode {} dbnode {} did not exist in db directory",
1418 : spcnode, dbnode
1419 : );
1420 : }
1421 :
1422 : // Update logical database size.
1423 0 : self.pending_nblocks -= total_blocks as i64;
1424 0 :
1425 0 : // Delete all relations and metadata files for the spcnode/dnode
1426 0 : self.delete(dbdir_key_range(spcnode, dbnode));
1427 0 : Ok(())
1428 0 : }
1429 :
1430 : /// Create a relation fork.
1431 : ///
1432 : /// 'nblocks' is the initial size.
1433 5760 : pub async fn put_rel_creation(
1434 5760 : &mut self,
1435 5760 : rel: RelTag,
1436 5760 : nblocks: BlockNumber,
1437 5760 : ctx: &RequestContext,
1438 5760 : ) -> Result<(), RelationError> {
1439 5760 : if rel.relnode == 0 {
1440 0 : return Err(RelationError::InvalidRelnode);
1441 5760 : }
1442 : // It's possible that this is the first rel for this db in this
1443 : // tablespace. Create the reldir entry for it if so.
1444 5760 : let mut dbdir = DbDirectory::des(&self.get(DBDIR_KEY, ctx).await.context("read db")?)
1445 5760 : .context("deserialize db")?;
1446 5760 : let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1447 5760 : let mut rel_dir =
1448 5760 : if let hash_map::Entry::Vacant(e) = dbdir.dbdirs.entry((rel.spcnode, rel.dbnode)) {
1449 : // Didn't exist. Update dbdir
1450 24 : e.insert(false);
1451 24 : let buf = DbDirectory::ser(&dbdir).context("serialize db")?;
1452 24 : self.pending_directory_entries
1453 24 : .push((DirectoryKind::Db, dbdir.dbdirs.len()));
1454 24 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1455 24 :
1456 24 : // and create the RelDirectory
1457 24 : RelDirectory::default()
1458 : } else {
1459 : // reldir already exists, fetch it
1460 5736 : RelDirectory::des(&self.get(rel_dir_key, ctx).await.context("read db")?)
1461 5736 : .context("deserialize db")?
1462 : };
1463 :
1464 : // Add the new relation to the rel directory entry, and write it back
1465 5760 : if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
1466 0 : return Err(RelationError::AlreadyExists);
1467 5760 : }
1468 5760 :
1469 5760 : self.pending_directory_entries
1470 5760 : .push((DirectoryKind::Rel, rel_dir.rels.len()));
1471 5760 :
1472 5760 : self.put(
1473 5760 : rel_dir_key,
1474 5760 : Value::Image(Bytes::from(
1475 5760 : RelDirectory::ser(&rel_dir).context("serialize")?,
1476 : )),
1477 : );
1478 :
1479 : // Put size
1480 5760 : let size_key = rel_size_to_key(rel);
1481 5760 : let buf = nblocks.to_le_bytes();
1482 5760 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1483 5760 :
1484 5760 : self.pending_nblocks += nblocks as i64;
1485 5760 :
1486 5760 : // Update relation size cache
1487 5760 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1488 5760 :
1489 5760 : // Even if nblocks > 0, we don't insert any actual blocks here. That's up to the
1490 5760 : // caller.
1491 5760 : Ok(())
1492 5760 : }
1493 :
1494 : /// Truncate relation
1495 18036 : pub async fn put_rel_truncation(
1496 18036 : &mut self,
1497 18036 : rel: RelTag,
1498 18036 : nblocks: BlockNumber,
1499 18036 : ctx: &RequestContext,
1500 18036 : ) -> anyhow::Result<()> {
1501 18036 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1502 18036 : if self
1503 18036 : .tline
1504 18036 : .get_rel_exists(rel, Version::Modified(self), ctx)
1505 0 : .await?
1506 : {
1507 18036 : let size_key = rel_size_to_key(rel);
1508 : // Fetch the old size first
1509 18036 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1510 18036 :
1511 18036 : // Update the entry with the new size.
1512 18036 : let buf = nblocks.to_le_bytes();
1513 18036 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1514 18036 :
1515 18036 : // Update relation size cache
1516 18036 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1517 18036 :
1518 18036 : // Update relation size cache
1519 18036 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1520 18036 :
1521 18036 : // Update logical database size.
1522 18036 : self.pending_nblocks -= old_size as i64 - nblocks as i64;
1523 0 : }
1524 18036 : Ok(())
1525 18036 : }
1526 :
1527 : /// Extend relation
1528 : /// If new size is smaller, do nothing.
1529 830040 : pub async fn put_rel_extend(
1530 830040 : &mut self,
1531 830040 : rel: RelTag,
1532 830040 : nblocks: BlockNumber,
1533 830040 : ctx: &RequestContext,
1534 830040 : ) -> anyhow::Result<()> {
1535 830040 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1536 :
1537 : // Put size
1538 830040 : let size_key = rel_size_to_key(rel);
1539 830040 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1540 830040 :
1541 830040 : // only extend relation here. never decrease the size
1542 830040 : if nblocks > old_size {
1543 824364 : let buf = nblocks.to_le_bytes();
1544 824364 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1545 824364 :
1546 824364 : // Update relation size cache
1547 824364 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1548 824364 :
1549 824364 : self.pending_nblocks += nblocks as i64 - old_size as i64;
1550 824364 : }
1551 830040 : Ok(())
1552 830040 : }
1553 :
1554 : /// Drop a relation.
1555 6 : pub async fn put_rel_drop(&mut self, rel: RelTag, ctx: &RequestContext) -> anyhow::Result<()> {
1556 6 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1557 :
1558 : // Remove it from the directory entry
1559 6 : let dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1560 6 : let buf = self.get(dir_key, ctx).await?;
1561 6 : let mut dir = RelDirectory::des(&buf)?;
1562 :
1563 6 : self.pending_directory_entries
1564 6 : .push((DirectoryKind::Rel, dir.rels.len()));
1565 6 :
1566 6 : if dir.rels.remove(&(rel.relnode, rel.forknum)) {
1567 6 : self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
1568 : } else {
1569 0 : warn!("dropped rel {} did not exist in rel directory", rel);
1570 : }
1571 :
1572 : // update logical size
1573 6 : let size_key = rel_size_to_key(rel);
1574 6 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1575 6 : self.pending_nblocks -= old_size as i64;
1576 6 :
1577 6 : // Remove enty from relation size cache
1578 6 : self.tline.remove_cached_rel_size(&rel);
1579 6 :
1580 6 : // Delete size entry, as well as all blocks
1581 6 : self.delete(rel_key_range(rel));
1582 6 :
1583 6 : Ok(())
1584 6 : }
1585 :
1586 18 : pub async fn put_slru_segment_creation(
1587 18 : &mut self,
1588 18 : kind: SlruKind,
1589 18 : segno: u32,
1590 18 : nblocks: BlockNumber,
1591 18 : ctx: &RequestContext,
1592 18 : ) -> anyhow::Result<()> {
1593 18 : // Add it to the directory entry
1594 18 : let dir_key = slru_dir_to_key(kind);
1595 18 : let buf = self.get(dir_key, ctx).await?;
1596 18 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1597 :
1598 18 : if !dir.segments.insert(segno) {
1599 0 : anyhow::bail!("slru segment {kind:?}/{segno} already exists");
1600 18 : }
1601 18 : self.pending_directory_entries
1602 18 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1603 18 : self.put(
1604 18 : dir_key,
1605 18 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1606 : );
1607 :
1608 : // Put size
1609 18 : let size_key = slru_segment_size_to_key(kind, segno);
1610 18 : let buf = nblocks.to_le_bytes();
1611 18 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1612 18 :
1613 18 : // even if nblocks > 0, we don't insert any actual blocks here
1614 18 :
1615 18 : Ok(())
1616 18 : }
1617 :
1618 : /// Extend SLRU segment
1619 0 : pub fn put_slru_extend(
1620 0 : &mut self,
1621 0 : kind: SlruKind,
1622 0 : segno: u32,
1623 0 : nblocks: BlockNumber,
1624 0 : ) -> anyhow::Result<()> {
1625 0 : // Put size
1626 0 : let size_key = slru_segment_size_to_key(kind, segno);
1627 0 : let buf = nblocks.to_le_bytes();
1628 0 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1629 0 : Ok(())
1630 0 : }
1631 :
1632 : /// This method is used for marking truncated SLRU files
1633 0 : pub async fn drop_slru_segment(
1634 0 : &mut self,
1635 0 : kind: SlruKind,
1636 0 : segno: u32,
1637 0 : ctx: &RequestContext,
1638 0 : ) -> anyhow::Result<()> {
1639 0 : // Remove it from the directory entry
1640 0 : let dir_key = slru_dir_to_key(kind);
1641 0 : let buf = self.get(dir_key, ctx).await?;
1642 0 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1643 :
1644 0 : if !dir.segments.remove(&segno) {
1645 0 : warn!("slru segment {:?}/{} does not exist", kind, segno);
1646 0 : }
1647 0 : self.pending_directory_entries
1648 0 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1649 0 : self.put(
1650 0 : dir_key,
1651 0 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1652 : );
1653 :
1654 : // Delete size entry, as well as all blocks
1655 0 : self.delete(slru_segment_key_range(kind, segno));
1656 0 :
1657 0 : Ok(())
1658 0 : }
1659 :
1660 : /// Drop a relmapper file (pg_filenode.map)
1661 0 : pub fn drop_relmap_file(&mut self, _spcnode: Oid, _dbnode: Oid) -> anyhow::Result<()> {
1662 0 : // TODO
1663 0 : Ok(())
1664 0 : }
1665 :
1666 : /// This method is used for marking truncated SLRU files
1667 0 : pub async fn drop_twophase_file(
1668 0 : &mut self,
1669 0 : xid: u64,
1670 0 : ctx: &RequestContext,
1671 0 : ) -> anyhow::Result<()> {
1672 : // Remove it from the directory entry
1673 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1674 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1675 0 : let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
1676 :
1677 0 : if !dir.xids.remove(&xid) {
1678 0 : warn!("twophase file for xid {} does not exist", xid);
1679 0 : }
1680 0 : self.pending_directory_entries
1681 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1682 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1683 : } else {
1684 0 : let xid: u32 = u32::try_from(xid)?;
1685 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
1686 :
1687 0 : if !dir.xids.remove(&xid) {
1688 0 : warn!("twophase file for xid {} does not exist", xid);
1689 0 : }
1690 0 : self.pending_directory_entries
1691 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1692 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
1693 : };
1694 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
1695 0 :
1696 0 : // Delete it
1697 0 : self.delete(twophase_key_range(xid));
1698 0 :
1699 0 : Ok(())
1700 0 : }
1701 :
1702 570 : pub fn init_aux_dir(&mut self) -> anyhow::Result<()> {
1703 570 : if let AuxFilePolicy::V2 = self.tline.get_switch_aux_file_policy() {
1704 558 : return Ok(());
1705 12 : }
1706 12 : let buf = AuxFilesDirectory::ser(&AuxFilesDirectory {
1707 12 : files: HashMap::new(),
1708 12 : })?;
1709 12 : self.pending_directory_entries
1710 12 : .push((DirectoryKind::AuxFiles, 0));
1711 12 : self.put(AUX_FILES_KEY, Value::Image(Bytes::from(buf)));
1712 12 : Ok(())
1713 570 : }
1714 :
1715 90 : pub async fn put_file(
1716 90 : &mut self,
1717 90 : path: &str,
1718 90 : content: &[u8],
1719 90 : ctx: &RequestContext,
1720 90 : ) -> anyhow::Result<()> {
1721 90 : let switch_policy = self.tline.get_switch_aux_file_policy();
1722 :
1723 90 : let policy = {
1724 90 : let current_policy = self.tline.last_aux_file_policy.load();
1725 : // Allowed switch path:
1726 : // * no aux files -> v1/v2/cross-validation
1727 : // * cross-validation->v2
1728 :
1729 90 : let current_policy = if current_policy.is_none() {
1730 : // This path will only be hit once per tenant: we will decide the final policy in this code block.
1731 : // The next call to `put_file` will always have `last_aux_file_policy != None`.
1732 36 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
1733 36 : let aux_files_key_v1 = self.tline.list_aux_files_v1(lsn, ctx).await?;
1734 36 : if aux_files_key_v1.is_empty() {
1735 30 : None
1736 : } else {
1737 6 : warn!("this timeline is using deprecated aux file policy V1 (detected existing v1 files)");
1738 6 : self.tline.do_switch_aux_policy(AuxFilePolicy::V1)?;
1739 6 : Some(AuxFilePolicy::V1)
1740 : }
1741 : } else {
1742 54 : current_policy
1743 : };
1744 :
1745 90 : if AuxFilePolicy::is_valid_migration_path(current_policy, switch_policy) {
1746 36 : self.tline.do_switch_aux_policy(switch_policy)?;
1747 36 : info!(current=?current_policy, next=?switch_policy, "switching aux file policy");
1748 36 : switch_policy
1749 : } else {
1750 : // This branch handles non-valid migration path, and the case that switch_policy == current_policy.
1751 : // And actually, because the migration path always allow unspecified -> *, this unwrap_or will never be hit.
1752 54 : current_policy.unwrap_or(AuxFilePolicy::default_tenant_config())
1753 : }
1754 : };
1755 :
1756 90 : if let AuxFilePolicy::V2 | AuxFilePolicy::CrossValidation = policy {
1757 78 : let key = aux_file::encode_aux_file_key(path);
1758 : // retrieve the key from the engine
1759 78 : let old_val = match self.get(key, ctx).await {
1760 18 : Ok(val) => Some(val),
1761 60 : Err(PageReconstructError::MissingKey(_)) => None,
1762 0 : Err(e) => return Err(e.into()),
1763 : };
1764 78 : let files: Vec<(&str, &[u8])> = if let Some(ref old_val) = old_val {
1765 18 : aux_file::decode_file_value(old_val)?
1766 : } else {
1767 60 : Vec::new()
1768 : };
1769 78 : let mut other_files = Vec::with_capacity(files.len());
1770 78 : let mut modifying_file = None;
1771 96 : for file @ (p, content) in files {
1772 18 : if path == p {
1773 18 : assert!(
1774 18 : modifying_file.is_none(),
1775 0 : "duplicated entries found for {}",
1776 : path
1777 : );
1778 18 : modifying_file = Some(content);
1779 0 : } else {
1780 0 : other_files.push(file);
1781 0 : }
1782 : }
1783 78 : let mut new_files = other_files;
1784 78 : match (modifying_file, content.is_empty()) {
1785 12 : (Some(old_content), false) => {
1786 12 : self.tline
1787 12 : .aux_file_size_estimator
1788 12 : .on_update(old_content.len(), content.len());
1789 12 : new_files.push((path, content));
1790 12 : }
1791 6 : (Some(old_content), true) => {
1792 6 : self.tline
1793 6 : .aux_file_size_estimator
1794 6 : .on_remove(old_content.len());
1795 6 : // not adding the file key to the final `new_files` vec.
1796 6 : }
1797 60 : (None, false) => {
1798 60 : self.tline.aux_file_size_estimator.on_add(content.len());
1799 60 : new_files.push((path, content));
1800 60 : }
1801 0 : (None, true) => warn!("removing non-existing aux file: {}", path),
1802 : }
1803 78 : let new_val = aux_file::encode_file_value(&new_files)?;
1804 78 : self.put(key, Value::Image(new_val.into()));
1805 12 : }
1806 :
1807 90 : if let AuxFilePolicy::V1 | AuxFilePolicy::CrossValidation = policy {
1808 18 : let file_path = path.to_string();
1809 18 : let content = if content.is_empty() {
1810 0 : None
1811 : } else {
1812 18 : Some(Bytes::copy_from_slice(content))
1813 : };
1814 :
1815 : let n_files;
1816 18 : let mut aux_files = self.tline.aux_files.lock().await;
1817 18 : if let Some(mut dir) = aux_files.dir.take() {
1818 : // We already updated aux files in `self`: emit a delta and update our latest value.
1819 0 : dir.upsert(file_path.clone(), content.clone());
1820 0 : n_files = dir.files.len();
1821 0 : if aux_files.n_deltas == MAX_AUX_FILE_DELTAS {
1822 0 : self.put(
1823 0 : AUX_FILES_KEY,
1824 0 : Value::Image(Bytes::from(
1825 0 : AuxFilesDirectory::ser(&dir).context("serialize")?,
1826 : )),
1827 : );
1828 0 : aux_files.n_deltas = 0;
1829 0 : } else {
1830 0 : self.put(
1831 0 : AUX_FILES_KEY,
1832 0 : Value::WalRecord(NeonWalRecord::AuxFile { file_path, content }),
1833 0 : );
1834 0 : aux_files.n_deltas += 1;
1835 0 : }
1836 0 : aux_files.dir = Some(dir);
1837 : } else {
1838 : // Check if the AUX_FILES_KEY is initialized
1839 18 : match self.get(AUX_FILES_KEY, ctx).await {
1840 18 : Ok(dir_bytes) => {
1841 18 : let mut dir = AuxFilesDirectory::des(&dir_bytes)?;
1842 : // Key is already set, we may append a delta
1843 18 : self.put(
1844 18 : AUX_FILES_KEY,
1845 18 : Value::WalRecord(NeonWalRecord::AuxFile {
1846 18 : file_path: file_path.clone(),
1847 18 : content: content.clone(),
1848 18 : }),
1849 18 : );
1850 18 : dir.upsert(file_path, content);
1851 18 : n_files = dir.files.len();
1852 18 : aux_files.dir = Some(dir);
1853 : }
1854 : Err(
1855 0 : e @ (PageReconstructError::Cancelled
1856 0 : | PageReconstructError::AncestorLsnTimeout(_)),
1857 0 : ) => {
1858 0 : // Important that we do not interpret a shutdown error as "not found" and thereby
1859 0 : // reset the map.
1860 0 : return Err(e.into());
1861 : }
1862 : // Note: we added missing key error variant in https://github.com/neondatabase/neon/pull/7393 but
1863 : // the original code assumes all other errors are missing keys. Therefore, we keep the code path
1864 : // the same for now, though in theory, we should only match the `MissingKey` variant.
1865 : Err(
1866 0 : e @ (PageReconstructError::Other(_)
1867 : | PageReconstructError::WalRedo(_)
1868 : | PageReconstructError::MissingKey(_)),
1869 : ) => {
1870 : // Key is missing, we must insert an image as the basis for subsequent deltas.
1871 :
1872 0 : if !matches!(e, PageReconstructError::MissingKey(_)) {
1873 0 : let e = utils::error::report_compact_sources(&e);
1874 0 : tracing::warn!("treating error as if it was a missing key: {}", e);
1875 0 : }
1876 :
1877 0 : let mut dir = AuxFilesDirectory {
1878 0 : files: HashMap::new(),
1879 0 : };
1880 0 : dir.upsert(file_path, content);
1881 0 : self.put(
1882 0 : AUX_FILES_KEY,
1883 0 : Value::Image(Bytes::from(
1884 0 : AuxFilesDirectory::ser(&dir).context("serialize")?,
1885 : )),
1886 : );
1887 0 : n_files = 1;
1888 0 : aux_files.dir = Some(dir);
1889 : }
1890 : }
1891 : }
1892 :
1893 18 : self.pending_directory_entries
1894 18 : .push((DirectoryKind::AuxFiles, n_files));
1895 72 : }
1896 :
1897 90 : Ok(())
1898 90 : }
1899 :
1900 : ///
1901 : /// Flush changes accumulated so far to the underlying repository.
1902 : ///
1903 : /// Usually, changes made in DatadirModification are atomic, but this allows
1904 : /// you to flush them to the underlying repository before the final `commit`.
1905 : /// That allows to free up the memory used to hold the pending changes.
1906 : ///
1907 : /// Currently only used during bulk import of a data directory. In that
1908 : /// context, breaking the atomicity is OK. If the import is interrupted, the
1909 : /// whole import fails and the timeline will be deleted anyway.
1910 : /// (Or to be precise, it will be left behind for debugging purposes and
1911 : /// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
1912 : ///
1913 : /// Note: A consequence of flushing the pending operations is that they
1914 : /// won't be visible to subsequent operations until `commit`. The function
1915 : /// retains all the metadata, but data pages are flushed. That's again OK
1916 : /// for bulk import, where you are just loading data pages and won't try to
1917 : /// modify the same pages twice.
1918 5790 : pub(crate) async fn flush(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
1919 5790 : // Unless we have accumulated a decent amount of changes, it's not worth it
1920 5790 : // to scan through the pending_updates list.
1921 5790 : let pending_nblocks = self.pending_nblocks;
1922 5790 : if pending_nblocks < 10000 {
1923 5790 : return Ok(());
1924 0 : }
1925 :
1926 0 : let mut writer = self.tline.writer().await;
1927 :
1928 : // Flush relation and SLRU data blocks, keep metadata.
1929 0 : let pending_data_pages = std::mem::take(&mut self.pending_data_pages);
1930 0 :
1931 0 : // This bails out on first error without modifying pending_updates.
1932 0 : // That's Ok, cf this function's doc comment.
1933 0 : writer.put_batch(pending_data_pages, ctx).await?;
1934 0 : self.pending_bytes = 0;
1935 0 :
1936 0 : if pending_nblocks != 0 {
1937 0 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
1938 0 : self.pending_nblocks = 0;
1939 0 : }
1940 :
1941 0 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
1942 0 : writer.update_directory_entries_count(kind, count as u64);
1943 0 : }
1944 :
1945 0 : Ok(())
1946 5790 : }
1947 :
1948 : ///
1949 : /// Finish this atomic update, writing all the updated keys to the
1950 : /// underlying timeline.
1951 : /// All the modifications in this atomic update are stamped by the specified LSN.
1952 : ///
1953 2229228 : pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
1954 2229228 : // Commit should never be called mid-wal-record
1955 2229228 : assert!(self.pending_zero_data_pages.is_empty());
1956 :
1957 2229228 : let mut writer = self.tline.writer().await;
1958 :
1959 2229228 : let pending_nblocks = self.pending_nblocks;
1960 2229228 : self.pending_nblocks = 0;
1961 2229228 :
1962 2229228 : // Ordering: the items in this batch do not need to be in any global order, but values for
1963 2229228 : // a particular Key must be in Lsn order relative to one another. InMemoryLayer relies on
1964 2229228 : // this to do efficient updates to its index.
1965 2229228 : let mut write_batch = std::mem::take(&mut self.pending_data_pages);
1966 2229228 :
1967 2229228 : write_batch.extend(
1968 2229228 : self.pending_metadata_pages
1969 2229228 : .drain()
1970 2229228 : .flat_map(|(key, values)| {
1971 821460 : values
1972 821460 : .into_iter()
1973 821460 : .map(move |(lsn, value_size, value)| (key, lsn, value_size, value))
1974 2229228 : }),
1975 2229228 : );
1976 2229228 :
1977 2229228 : if !write_batch.is_empty() {
1978 1242180 : writer.put_batch(write_batch, ctx).await?;
1979 987048 : }
1980 :
1981 2229228 : if !self.pending_deletions.is_empty() {
1982 6 : writer.delete_batch(&self.pending_deletions, ctx).await?;
1983 6 : self.pending_deletions.clear();
1984 2229222 : }
1985 :
1986 2229228 : self.pending_lsns.push(self.lsn);
1987 2666802 : for pending_lsn in self.pending_lsns.drain(..) {
1988 2666802 : // Ideally, we should be able to call writer.finish_write() only once
1989 2666802 : // with the highest LSN. However, the last_record_lsn variable in the
1990 2666802 : // timeline keeps track of the latest LSN and the immediate previous LSN
1991 2666802 : // so we need to record every LSN to not leave a gap between them.
1992 2666802 : writer.finish_write(pending_lsn);
1993 2666802 : }
1994 :
1995 2229228 : if pending_nblocks != 0 {
1996 811710 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
1997 1417518 : }
1998 :
1999 2229228 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2000 8472 : writer.update_directory_entries_count(kind, count as u64);
2001 8472 : }
2002 :
2003 2229228 : self.pending_bytes = 0;
2004 2229228 :
2005 2229228 : Ok(())
2006 2229228 : }
2007 :
2008 875112 : pub(crate) fn len(&self) -> usize {
2009 875112 : self.pending_metadata_pages.len()
2010 875112 : + self.pending_data_pages.len()
2011 875112 : + self.pending_deletions.len()
2012 875112 : }
2013 :
2014 : /// Read a page from the Timeline we are writing to. For metadata pages, this passes through
2015 : /// a cache in Self, which makes writes earlier in this modification visible to WAL records later
2016 : /// in the modification.
2017 : ///
2018 : /// For data pages, reads pass directly to the owning Timeline: any ingest code which reads a data
2019 : /// page must ensure that the pages they read are already committed in Timeline, for example
2020 : /// DB create operations are always preceded by a call to commit(). This is special cased because
2021 : /// it's rare: all the 'normal' WAL operations will only read metadata pages such as relation sizes,
2022 : /// and not data pages.
2023 859806 : async fn get(&self, key: Key, ctx: &RequestContext) -> Result<Bytes, PageReconstructError> {
2024 859806 : if !Self::is_data_key(&key) {
2025 : // Have we already updated the same key? Read the latest pending updated
2026 : // version in that case.
2027 : //
2028 : // Note: we don't check pending_deletions. It is an error to request a
2029 : // value that has been removed, deletion only avoids leaking storage.
2030 859806 : if let Some(values) = self.pending_metadata_pages.get(&key.to_compact()) {
2031 47784 : if let Some((_, _, value)) = values.last() {
2032 47784 : return if let Value::Image(img) = value {
2033 47784 : Ok(img.clone())
2034 : } else {
2035 : // Currently, we never need to read back a WAL record that we
2036 : // inserted in the same "transaction". All the metadata updates
2037 : // work directly with Images, and we never need to read actual
2038 : // data pages. We could handle this if we had to, by calling
2039 : // the walredo manager, but let's keep it simple for now.
2040 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
2041 0 : "unexpected pending WAL record"
2042 0 : )))
2043 : };
2044 0 : }
2045 812022 : }
2046 : } else {
2047 : // This is an expensive check, so we only do it in debug mode. If reading a data key,
2048 : // this key should never be present in pending_data_pages. We ensure this by committing
2049 : // modifications before ingesting DB create operations, which are the only kind that reads
2050 : // data pages during ingest.
2051 0 : if cfg!(debug_assertions) {
2052 0 : for (dirty_key, _, _, _) in &self.pending_data_pages {
2053 0 : debug_assert!(&key.to_compact() != dirty_key);
2054 : }
2055 :
2056 0 : debug_assert!(!self.pending_zero_data_pages.contains(&key.to_compact()))
2057 0 : }
2058 : }
2059 :
2060 : // Metadata page cache miss, or we're reading a data page.
2061 812022 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
2062 812022 : self.tline.get(key, lsn, ctx).await
2063 859806 : }
2064 :
2065 : /// Only used during unit tests, force putting a key into the modification.
2066 : #[cfg(test)]
2067 6 : pub(crate) fn put_for_test(&mut self, key: Key, val: Value) {
2068 6 : self.put(key, val);
2069 6 : }
2070 :
2071 2128446 : fn put(&mut self, key: Key, val: Value) {
2072 2128446 : if Self::is_data_key(&key) {
2073 1270530 : self.put_data(key.to_compact(), val)
2074 : } else {
2075 857916 : self.put_metadata(key.to_compact(), val)
2076 : }
2077 2128446 : }
2078 :
2079 1279524 : fn put_data(&mut self, key: CompactKey, val: Value) {
2080 1279524 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2081 1279524 :
2082 1279524 : // If this page was previously zero'd in the same WalRecord, then drop the previous zero page write. This
2083 1279524 : // is an optimization that avoids persisting both the zero page generated by us (e.g. during a relation extend),
2084 1279524 : // and the subsequent postgres-originating write
2085 1279524 : if self.pending_zero_data_pages.remove(&key) {
2086 0 : self.pending_bytes -= ZERO_PAGE.len();
2087 1279524 : }
2088 :
2089 1279524 : self.pending_bytes += val_serialized_size;
2090 1279524 : self.pending_data_pages
2091 1279524 : .push((key, self.lsn, val_serialized_size, val))
2092 1279524 : }
2093 :
2094 857916 : fn put_metadata(&mut self, key: CompactKey, val: Value) {
2095 857916 : let values = self.pending_metadata_pages.entry(key).or_default();
2096 : // Replace the previous value if it exists at the same lsn
2097 857916 : if let Some((last_lsn, last_value_ser_size, last_value)) = values.last_mut() {
2098 36456 : if *last_lsn == self.lsn {
2099 : // Update the pending_bytes contribution from this entry, and update the serialized size in place
2100 36456 : self.pending_bytes -= *last_value_ser_size;
2101 36456 : *last_value_ser_size = val.serialized_size().unwrap() as usize;
2102 36456 : self.pending_bytes += *last_value_ser_size;
2103 36456 :
2104 36456 : // Use the latest value, this replaces any earlier write to the same (key,lsn), such as much
2105 36456 : // have been generated by synthesized zero page writes prior to the first real write to a page.
2106 36456 : *last_value = val;
2107 36456 : return;
2108 0 : }
2109 821460 : }
2110 :
2111 821460 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2112 821460 : self.pending_bytes += val_serialized_size;
2113 821460 : values.push((self.lsn, val_serialized_size, val));
2114 857916 : }
2115 :
2116 6 : fn delete(&mut self, key_range: Range<Key>) {
2117 6 : trace!("DELETE {}-{}", key_range.start, key_range.end);
2118 6 : self.pending_deletions.push((key_range, self.lsn));
2119 6 : }
2120 : }
2121 :
2122 : /// This struct facilitates accessing either a committed key from the timeline at a
2123 : /// specific LSN, or the latest uncommitted key from a pending modification.
2124 : ///
2125 : /// During WAL ingestion, the records from multiple LSNs may be batched in the same
2126 : /// modification before being flushed to the timeline. Hence, the routines in WalIngest
2127 : /// need to look up the keys in the modification first before looking them up in the
2128 : /// timeline to not miss the latest updates.
2129 : #[derive(Clone, Copy)]
2130 : pub enum Version<'a> {
2131 : Lsn(Lsn),
2132 : Modified(&'a DatadirModification<'a>),
2133 : }
2134 :
2135 : impl<'a> Version<'a> {
2136 70680 : async fn get(
2137 70680 : &self,
2138 70680 : timeline: &Timeline,
2139 70680 : key: Key,
2140 70680 : ctx: &RequestContext,
2141 70680 : ) -> Result<Bytes, PageReconstructError> {
2142 70680 : match self {
2143 70620 : Version::Lsn(lsn) => timeline.get(key, *lsn, ctx).await,
2144 60 : Version::Modified(modification) => modification.get(key, ctx).await,
2145 : }
2146 70680 : }
2147 :
2148 106860 : fn get_lsn(&self) -> Lsn {
2149 106860 : match self {
2150 88722 : Version::Lsn(lsn) => *lsn,
2151 18138 : Version::Modified(modification) => modification.lsn,
2152 : }
2153 106860 : }
2154 : }
2155 :
2156 : //--- Metadata structs stored in key-value pairs in the repository.
2157 :
2158 6726 : #[derive(Debug, Serialize, Deserialize)]
2159 : struct DbDirectory {
2160 : // (spcnode, dbnode) -> (do relmapper and PG_VERSION files exist)
2161 : dbdirs: HashMap<(Oid, Oid), bool>,
2162 : }
2163 :
2164 : // The format of TwoPhaseDirectory changed in PostgreSQL v17, because the filenames of
2165 : // pg_twophase files was expanded from 32-bit XIDs to 64-bit XIDs. Previously, the files
2166 : // were named like "pg_twophase/000002E5", now they're like
2167 : // "pg_twophsae/0000000A000002E4".
2168 :
2169 870 : #[derive(Debug, Serialize, Deserialize)]
2170 : struct TwoPhaseDirectory {
2171 : xids: HashSet<TransactionId>,
2172 : }
2173 :
2174 0 : #[derive(Debug, Serialize, Deserialize)]
2175 : struct TwoPhaseDirectoryV17 {
2176 : xids: HashSet<u64>,
2177 : }
2178 :
2179 5796 : #[derive(Debug, Serialize, Deserialize, Default)]
2180 : struct RelDirectory {
2181 : // Set of relations that exist. (relfilenode, forknum)
2182 : //
2183 : // TODO: Store it as a btree or radix tree or something else that spans multiple
2184 : // key-value pairs, if you have a lot of relations
2185 : rels: HashSet<(Oid, u8)>,
2186 : }
2187 :
2188 84 : #[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
2189 : pub(crate) struct AuxFilesDirectory {
2190 : pub(crate) files: HashMap<String, Bytes>,
2191 : }
2192 :
2193 : impl AuxFilesDirectory {
2194 48 : pub(crate) fn upsert(&mut self, key: String, value: Option<Bytes>) {
2195 48 : if let Some(value) = value {
2196 42 : self.files.insert(key, value);
2197 42 : } else {
2198 6 : self.files.remove(&key);
2199 6 : }
2200 48 : }
2201 : }
2202 :
2203 0 : #[derive(Debug, Serialize, Deserialize)]
2204 : struct RelSizeEntry {
2205 : nblocks: u32,
2206 : }
2207 :
2208 2610 : #[derive(Debug, Serialize, Deserialize, Default)]
2209 : struct SlruSegmentDirectory {
2210 : // Set of SLRU segments that exist.
2211 : segments: HashSet<u32>,
2212 : }
2213 :
2214 : #[derive(Copy, Clone, PartialEq, Eq, Debug, enum_map::Enum)]
2215 : #[repr(u8)]
2216 : pub(crate) enum DirectoryKind {
2217 : Db,
2218 : TwoPhase,
2219 : Rel,
2220 : AuxFiles,
2221 : SlruSegment(SlruKind),
2222 : }
2223 :
2224 : impl DirectoryKind {
2225 : pub(crate) const KINDS_NUM: usize = <DirectoryKind as Enum>::LENGTH;
2226 16944 : pub(crate) fn offset(&self) -> usize {
2227 16944 : self.into_usize()
2228 16944 : }
2229 : }
2230 :
2231 : static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]);
2232 :
2233 : #[allow(clippy::bool_assert_comparison)]
2234 : #[cfg(test)]
2235 : mod tests {
2236 : use hex_literal::hex;
2237 : use utils::id::TimelineId;
2238 :
2239 : use super::*;
2240 :
2241 : use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION};
2242 :
2243 : /// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
2244 : #[tokio::test]
2245 6 : async fn aux_files_round_trip() -> anyhow::Result<()> {
2246 6 : let name = "aux_files_round_trip";
2247 6 : let harness = TenantHarness::create(name).await?;
2248 6 :
2249 6 : pub const TIMELINE_ID: TimelineId =
2250 6 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
2251 6 :
2252 24 : let (tenant, ctx) = harness.load().await;
2253 6 : let tline = tenant
2254 6 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
2255 6 : .await?;
2256 6 : let tline = tline.raw_timeline().unwrap();
2257 6 :
2258 6 : // First modification: insert two keys
2259 6 : let mut modification = tline.begin_modification(Lsn(0x1000));
2260 6 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
2261 6 : modification.set_lsn(Lsn(0x1008))?;
2262 6 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
2263 6 : modification.commit(&ctx).await?;
2264 6 : let expect_1008 = HashMap::from([
2265 6 : ("foo/bar1".to_string(), Bytes::from_static(b"content1")),
2266 6 : ("foo/bar2".to_string(), Bytes::from_static(b"content2")),
2267 6 : ]);
2268 6 :
2269 6 : let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
2270 6 : assert_eq!(readback, expect_1008);
2271 6 :
2272 6 : // Second modification: update one key, remove the other
2273 6 : let mut modification = tline.begin_modification(Lsn(0x2000));
2274 6 : modification.put_file("foo/bar1", b"content3", &ctx).await?;
2275 6 : modification.set_lsn(Lsn(0x2008))?;
2276 6 : modification.put_file("foo/bar2", b"", &ctx).await?;
2277 6 : modification.commit(&ctx).await?;
2278 6 : let expect_2008 =
2279 6 : HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
2280 6 :
2281 6 : let readback = tline.list_aux_files(Lsn(0x2008), &ctx).await?;
2282 6 : assert_eq!(readback, expect_2008);
2283 6 :
2284 6 : // Reading back in time works
2285 6 : let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
2286 6 : assert_eq!(readback, expect_1008);
2287 6 :
2288 6 : Ok(())
2289 6 : }
2290 :
2291 : /*
2292 : fn assert_current_logical_size<R: Repository>(timeline: &DatadirTimeline<R>, lsn: Lsn) {
2293 : let incremental = timeline.get_current_logical_size();
2294 : let non_incremental = timeline
2295 : .get_current_logical_size_non_incremental(lsn)
2296 : .unwrap();
2297 : assert_eq!(incremental, non_incremental);
2298 : }
2299 : */
2300 :
2301 : /*
2302 : ///
2303 : /// Test list_rels() function, with branches and dropped relations
2304 : ///
2305 : #[test]
2306 : fn test_list_rels_drop() -> Result<()> {
2307 : let repo = RepoHarness::create("test_list_rels_drop")?.load();
2308 : let tline = create_empty_timeline(repo, TIMELINE_ID)?;
2309 : const TESTDB: u32 = 111;
2310 :
2311 : // Import initial dummy checkpoint record, otherwise the get_timeline() call
2312 : // after branching fails below
2313 : let mut writer = tline.begin_record(Lsn(0x10));
2314 : writer.put_checkpoint(ZERO_CHECKPOINT.clone())?;
2315 : writer.finish()?;
2316 :
2317 : // Create a relation on the timeline
2318 : let mut writer = tline.begin_record(Lsn(0x20));
2319 : writer.put_rel_page_image(TESTREL_A, 0, TEST_IMG("foo blk 0 at 2"))?;
2320 : writer.finish()?;
2321 :
2322 : let writer = tline.begin_record(Lsn(0x00));
2323 : writer.finish()?;
2324 :
2325 : // Check that list_rels() lists it after LSN 2, but no before it
2326 : assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
2327 : assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
2328 : assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
2329 :
2330 : // Create a branch, check that the relation is visible there
2331 : repo.branch_timeline(&tline, NEW_TIMELINE_ID, Lsn(0x30))?;
2332 : let newtline = match repo.get_timeline(NEW_TIMELINE_ID)?.local_timeline() {
2333 : Some(timeline) => timeline,
2334 : None => panic!("Should have a local timeline"),
2335 : };
2336 : let newtline = DatadirTimelineImpl::new(newtline);
2337 : assert!(newtline
2338 : .list_rels(0, TESTDB, Lsn(0x30))?
2339 : .contains(&TESTREL_A));
2340 :
2341 : // Drop it on the branch
2342 : let mut new_writer = newtline.begin_record(Lsn(0x40));
2343 : new_writer.drop_relation(TESTREL_A)?;
2344 : new_writer.finish()?;
2345 :
2346 : // Check that it's no longer listed on the branch after the point where it was dropped
2347 : assert!(newtline
2348 : .list_rels(0, TESTDB, Lsn(0x30))?
2349 : .contains(&TESTREL_A));
2350 : assert!(!newtline
2351 : .list_rels(0, TESTDB, Lsn(0x40))?
2352 : .contains(&TESTREL_A));
2353 :
2354 : // Run checkpoint and garbage collection and check that it's still not visible
2355 : newtline.checkpoint(CheckpointConfig::Forced)?;
2356 : repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
2357 :
2358 : assert!(!newtline
2359 : .list_rels(0, TESTDB, Lsn(0x40))?
2360 : .contains(&TESTREL_A));
2361 :
2362 : Ok(())
2363 : }
2364 : */
2365 :
2366 : /*
2367 : #[test]
2368 : fn test_read_beyond_eof() -> Result<()> {
2369 : let repo = RepoHarness::create("test_read_beyond_eof")?.load();
2370 : let tline = create_test_timeline(repo, TIMELINE_ID)?;
2371 :
2372 : make_some_layers(&tline, Lsn(0x20))?;
2373 : let mut writer = tline.begin_record(Lsn(0x60));
2374 : walingest.put_rel_page_image(
2375 : &mut writer,
2376 : TESTREL_A,
2377 : 0,
2378 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x60))),
2379 : )?;
2380 : writer.finish()?;
2381 :
2382 : // Test read before rel creation. Should error out.
2383 : assert!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x10), false).is_err());
2384 :
2385 : // Read block beyond end of relation at different points in time.
2386 : // These reads should fall into different delta, image, and in-memory layers.
2387 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x20), false)?, ZERO_PAGE);
2388 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x25), false)?, ZERO_PAGE);
2389 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x30), false)?, ZERO_PAGE);
2390 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x35), false)?, ZERO_PAGE);
2391 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x40), false)?, ZERO_PAGE);
2392 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x45), false)?, ZERO_PAGE);
2393 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x50), false)?, ZERO_PAGE);
2394 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x55), false)?, ZERO_PAGE);
2395 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x60), false)?, ZERO_PAGE);
2396 :
2397 : // Test on an in-memory layer with no preceding layer
2398 : let mut writer = tline.begin_record(Lsn(0x70));
2399 : walingest.put_rel_page_image(
2400 : &mut writer,
2401 : TESTREL_B,
2402 : 0,
2403 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x70))),
2404 : )?;
2405 : writer.finish()?;
2406 :
2407 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_B, 1, Lsn(0x70), false)?6, ZERO_PAGE);
2408 :
2409 : Ok(())
2410 : }
2411 : */
2412 : }
|