Line data Source code
1 : //!
2 : //! This provides an abstraction to store PostgreSQL relations and other files
3 : //! in the key-value store that implements the Repository interface.
4 : //!
5 : //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as
6 : //! walingest.rs handles a few things like implicit relation creation and extension.
7 : //! Clarify that)
8 : //!
9 : use super::tenant::{PageReconstructError, Timeline};
10 : use crate::context::RequestContext;
11 : use crate::keyspace::{KeySpace, KeySpaceAccum};
12 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id;
13 : use crate::walrecord::NeonWalRecord;
14 : use crate::{aux_file, repository::*};
15 : use anyhow::{ensure, Context};
16 : use bytes::{Buf, Bytes, BytesMut};
17 : use enum_map::Enum;
18 : use pageserver_api::key::{
19 : dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key,
20 : relmap_file_key, repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key,
21 : slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range,
22 : CompactKey, AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY,
23 : };
24 : use pageserver_api::keyspace::SparseKeySpace;
25 : use pageserver_api::models::AuxFilePolicy;
26 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
27 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
28 : use postgres_ffi::BLCKSZ;
29 : use postgres_ffi::{Oid, RepOriginId, TimestampTz, TransactionId};
30 : use serde::{Deserialize, Serialize};
31 : use std::collections::{hash_map, HashMap, HashSet};
32 : use std::ops::ControlFlow;
33 : use std::ops::Range;
34 : use strum::IntoEnumIterator;
35 : use tokio_util::sync::CancellationToken;
36 : use tracing::{debug, info, trace, warn};
37 : use utils::bin_ser::DeserializeError;
38 : use utils::pausable_failpoint;
39 : use utils::{bin_ser::BeSer, lsn::Lsn};
40 :
41 : /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached.
42 : pub const MAX_AUX_FILE_DELTAS: usize = 1024;
43 :
44 : /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached.
45 : pub const MAX_AUX_FILE_V2_DELTAS: usize = 64;
46 :
47 : #[derive(Debug)]
48 : pub enum LsnForTimestamp {
49 : /// Found commits both before and after the given timestamp
50 : Present(Lsn),
51 :
52 : /// Found no commits after the given timestamp, this means
53 : /// that the newest data in the branch is older than the given
54 : /// timestamp.
55 : ///
56 : /// All commits <= LSN happened before the given timestamp
57 : Future(Lsn),
58 :
59 : /// The queried timestamp is past our horizon we look back at (PITR)
60 : ///
61 : /// All commits > LSN happened after the given timestamp,
62 : /// but any commits < LSN might have happened before or after
63 : /// the given timestamp. We don't know because no data before
64 : /// the given lsn is available.
65 : Past(Lsn),
66 :
67 : /// We have found no commit with a timestamp,
68 : /// so we can't return anything meaningful.
69 : ///
70 : /// The associated LSN is the lower bound value we can safely
71 : /// create branches on, but no statement is made if it is
72 : /// older or newer than the timestamp.
73 : ///
74 : /// This variant can e.g. be returned right after a
75 : /// cluster import.
76 : NoData(Lsn),
77 : }
78 :
79 0 : #[derive(Debug, thiserror::Error)]
80 : pub(crate) enum CalculateLogicalSizeError {
81 : #[error("cancelled")]
82 : Cancelled,
83 :
84 : /// Something went wrong while reading the metadata we use to calculate logical size
85 : /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`]
86 : /// in the `From` implementation for this variant.
87 : #[error(transparent)]
88 : PageRead(PageReconstructError),
89 :
90 : /// Something went wrong deserializing metadata that we read to calculate logical size
91 : #[error("decode error: {0}")]
92 : Decode(#[from] DeserializeError),
93 : }
94 :
95 0 : #[derive(Debug, thiserror::Error)]
96 : pub(crate) enum CollectKeySpaceError {
97 : #[error(transparent)]
98 : Decode(#[from] DeserializeError),
99 : #[error(transparent)]
100 : PageRead(PageReconstructError),
101 : #[error("cancelled")]
102 : Cancelled,
103 : }
104 :
105 : impl From<PageReconstructError> for CollectKeySpaceError {
106 0 : fn from(err: PageReconstructError) -> Self {
107 0 : match err {
108 0 : PageReconstructError::Cancelled => Self::Cancelled,
109 0 : err => Self::PageRead(err),
110 : }
111 0 : }
112 : }
113 :
114 : impl From<PageReconstructError> for CalculateLogicalSizeError {
115 0 : fn from(pre: PageReconstructError) -> Self {
116 0 : match pre {
117 0 : PageReconstructError::Cancelled => Self::Cancelled,
118 0 : _ => Self::PageRead(pre),
119 : }
120 0 : }
121 : }
122 :
123 0 : #[derive(Debug, thiserror::Error)]
124 : pub enum RelationError {
125 : #[error("Relation Already Exists")]
126 : AlreadyExists,
127 : #[error("invalid relnode")]
128 : InvalidRelnode,
129 : #[error(transparent)]
130 : Other(#[from] anyhow::Error),
131 : }
132 :
133 : ///
134 : /// This impl provides all the functionality to store PostgreSQL relations, SLRUs,
135 : /// and other special kinds of files, in a versioned key-value store. The
136 : /// Timeline struct provides the key-value store.
137 : ///
138 : /// This is a separate impl, so that we can easily include all these functions in a Timeline
139 : /// implementation, and might be moved into a separate struct later.
140 : impl Timeline {
141 : /// Start ingesting a WAL record, or other atomic modification of
142 : /// the timeline.
143 : ///
144 : /// This provides a transaction-like interface to perform a bunch
145 : /// of modifications atomically.
146 : ///
147 : /// To ingest a WAL record, call begin_modification(lsn) to get a
148 : /// DatadirModification object. Use the functions in the object to
149 : /// modify the repository state, updating all the pages and metadata
150 : /// that the WAL record affects. When you're done, call commit() to
151 : /// commit the changes.
152 : ///
153 : /// Lsn stored in modification is advanced by `ingest_record` and
154 : /// is used by `commit()` to update `last_record_lsn`.
155 : ///
156 : /// Calling commit() will flush all the changes and reset the state,
157 : /// so the `DatadirModification` struct can be reused to perform the next modification.
158 : ///
159 : /// Note that any pending modifications you make through the
160 : /// modification object won't be visible to calls to the 'get' and list
161 : /// functions of the timeline until you finish! And if you update the
162 : /// same page twice, the last update wins.
163 : ///
164 805176 : pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification
165 805176 : where
166 805176 : Self: Sized,
167 805176 : {
168 805176 : DatadirModification {
169 805176 : tline: self,
170 805176 : pending_lsns: Vec::new(),
171 805176 : pending_metadata_pages: HashMap::new(),
172 805176 : pending_data_pages: Vec::new(),
173 805176 : pending_zero_data_pages: Default::default(),
174 805176 : pending_deletions: Vec::new(),
175 805176 : pending_nblocks: 0,
176 805176 : pending_directory_entries: Vec::new(),
177 805176 : pending_bytes: 0,
178 805176 : lsn,
179 805176 : }
180 805176 : }
181 :
182 : //------------------------------------------------------------------------------
183 : // Public GET functions
184 : //------------------------------------------------------------------------------
185 :
186 : /// Look up given page version.
187 55152 : pub(crate) async fn get_rel_page_at_lsn(
188 55152 : &self,
189 55152 : tag: RelTag,
190 55152 : blknum: BlockNumber,
191 55152 : version: Version<'_>,
192 55152 : ctx: &RequestContext,
193 55152 : ) -> Result<Bytes, PageReconstructError> {
194 55152 : if tag.relnode == 0 {
195 0 : return Err(PageReconstructError::Other(
196 0 : RelationError::InvalidRelnode.into(),
197 0 : ));
198 55152 : }
199 :
200 55152 : let nblocks = self.get_rel_size(tag, version, ctx).await?;
201 55152 : if blknum >= nblocks {
202 0 : debug!(
203 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
204 0 : tag,
205 0 : blknum,
206 0 : version.get_lsn(),
207 : nblocks
208 : );
209 0 : return Ok(ZERO_PAGE.clone());
210 55152 : }
211 55152 :
212 55152 : let key = rel_block_to_key(tag, blknum);
213 55152 : version.get(self, key, ctx).await
214 55152 : }
215 :
216 : // Get size of a database in blocks
217 0 : pub(crate) async fn get_db_size(
218 0 : &self,
219 0 : spcnode: Oid,
220 0 : dbnode: Oid,
221 0 : version: Version<'_>,
222 0 : ctx: &RequestContext,
223 0 : ) -> Result<usize, PageReconstructError> {
224 0 : let mut total_blocks = 0;
225 :
226 0 : let rels = self.list_rels(spcnode, dbnode, version, ctx).await?;
227 :
228 0 : for rel in rels {
229 0 : let n_blocks = self.get_rel_size(rel, version, ctx).await?;
230 0 : total_blocks += n_blocks as usize;
231 : }
232 0 : Ok(total_blocks)
233 0 : }
234 :
235 : /// Get size of a relation file
236 73302 : pub(crate) async fn get_rel_size(
237 73302 : &self,
238 73302 : tag: RelTag,
239 73302 : version: Version<'_>,
240 73302 : ctx: &RequestContext,
241 73302 : ) -> Result<BlockNumber, PageReconstructError> {
242 73302 : if tag.relnode == 0 {
243 0 : return Err(PageReconstructError::Other(
244 0 : RelationError::InvalidRelnode.into(),
245 0 : ));
246 73302 : }
247 :
248 73302 : if let Some(nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
249 57882 : return Ok(nblocks);
250 15420 : }
251 15420 :
252 15420 : if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM)
253 0 : && !self.get_rel_exists(tag, version, ctx).await?
254 : {
255 : // FIXME: Postgres sometimes calls smgrcreate() to create
256 : // FSM, and smgrnblocks() on it immediately afterwards,
257 : // without extending it. Tolerate that by claiming that
258 : // any non-existent FSM fork has size 0.
259 0 : return Ok(0);
260 15420 : }
261 15420 :
262 15420 : let key = rel_size_to_key(tag);
263 15420 : let mut buf = version.get(self, key, ctx).await?;
264 15408 : let nblocks = buf.get_u32_le();
265 15408 :
266 15408 : self.update_cached_rel_size(tag, version.get_lsn(), nblocks);
267 15408 :
268 15408 : Ok(nblocks)
269 73302 : }
270 :
271 : /// Does relation exist?
272 18150 : pub(crate) async fn get_rel_exists(
273 18150 : &self,
274 18150 : tag: RelTag,
275 18150 : version: Version<'_>,
276 18150 : ctx: &RequestContext,
277 18150 : ) -> Result<bool, PageReconstructError> {
278 18150 : if tag.relnode == 0 {
279 0 : return Err(PageReconstructError::Other(
280 0 : RelationError::InvalidRelnode.into(),
281 0 : ));
282 18150 : }
283 :
284 : // first try to lookup relation in cache
285 18150 : if let Some(_nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
286 18096 : return Ok(true);
287 54 : }
288 : // then check if the database was already initialized.
289 : // get_rel_exists can be called before dbdir is created.
290 54 : let buf = version.get(self, DBDIR_KEY, ctx).await?;
291 54 : let dbdirs = DbDirectory::des(&buf)?.dbdirs;
292 54 : if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) {
293 0 : return Ok(false);
294 54 : }
295 54 : // fetch directory listing
296 54 : let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
297 54 : let buf = version.get(self, key, ctx).await?;
298 :
299 54 : let dir = RelDirectory::des(&buf)?;
300 54 : Ok(dir.rels.contains(&(tag.relnode, tag.forknum)))
301 18150 : }
302 :
303 : /// Get a list of all existing relations in given tablespace and database.
304 : ///
305 : /// # Cancel-Safety
306 : ///
307 : /// This method is cancellation-safe.
308 0 : pub(crate) async fn list_rels(
309 0 : &self,
310 0 : spcnode: Oid,
311 0 : dbnode: Oid,
312 0 : version: Version<'_>,
313 0 : ctx: &RequestContext,
314 0 : ) -> Result<HashSet<RelTag>, PageReconstructError> {
315 0 : // fetch directory listing
316 0 : let key = rel_dir_to_key(spcnode, dbnode);
317 0 : let buf = version.get(self, key, ctx).await?;
318 :
319 0 : let dir = RelDirectory::des(&buf)?;
320 0 : let rels: HashSet<RelTag> =
321 0 : HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
322 0 : spcnode,
323 0 : dbnode,
324 0 : relnode: *relnode,
325 0 : forknum: *forknum,
326 0 : }));
327 0 :
328 0 : Ok(rels)
329 0 : }
330 :
331 : /// Get the whole SLRU segment
332 0 : pub(crate) async fn get_slru_segment(
333 0 : &self,
334 0 : kind: SlruKind,
335 0 : segno: u32,
336 0 : lsn: Lsn,
337 0 : ctx: &RequestContext,
338 0 : ) -> Result<Bytes, PageReconstructError> {
339 0 : let n_blocks = self
340 0 : .get_slru_segment_size(kind, segno, Version::Lsn(lsn), ctx)
341 0 : .await?;
342 0 : let mut segment = BytesMut::with_capacity(n_blocks as usize * BLCKSZ as usize);
343 0 : for blkno in 0..n_blocks {
344 0 : let block = self
345 0 : .get_slru_page_at_lsn(kind, segno, blkno, lsn, ctx)
346 0 : .await?;
347 0 : segment.extend_from_slice(&block[..BLCKSZ as usize]);
348 : }
349 0 : Ok(segment.freeze())
350 0 : }
351 :
352 : /// Look up given SLRU page version.
353 0 : pub(crate) async fn get_slru_page_at_lsn(
354 0 : &self,
355 0 : kind: SlruKind,
356 0 : segno: u32,
357 0 : blknum: BlockNumber,
358 0 : lsn: Lsn,
359 0 : ctx: &RequestContext,
360 0 : ) -> Result<Bytes, PageReconstructError> {
361 0 : let key = slru_block_to_key(kind, segno, blknum);
362 0 : self.get(key, lsn, ctx).await
363 0 : }
364 :
365 : /// Get size of an SLRU segment
366 0 : pub(crate) async fn get_slru_segment_size(
367 0 : &self,
368 0 : kind: SlruKind,
369 0 : segno: u32,
370 0 : version: Version<'_>,
371 0 : ctx: &RequestContext,
372 0 : ) -> Result<BlockNumber, PageReconstructError> {
373 0 : let key = slru_segment_size_to_key(kind, segno);
374 0 : let mut buf = version.get(self, key, ctx).await?;
375 0 : Ok(buf.get_u32_le())
376 0 : }
377 :
378 : /// Get size of an SLRU segment
379 0 : pub(crate) async fn get_slru_segment_exists(
380 0 : &self,
381 0 : kind: SlruKind,
382 0 : segno: u32,
383 0 : version: Version<'_>,
384 0 : ctx: &RequestContext,
385 0 : ) -> Result<bool, PageReconstructError> {
386 0 : // fetch directory listing
387 0 : let key = slru_dir_to_key(kind);
388 0 : let buf = version.get(self, key, ctx).await?;
389 :
390 0 : let dir = SlruSegmentDirectory::des(&buf)?;
391 0 : Ok(dir.segments.contains(&segno))
392 0 : }
393 :
394 : /// Locate LSN, such that all transactions that committed before
395 : /// 'search_timestamp' are visible, but nothing newer is.
396 : ///
397 : /// This is not exact. Commit timestamps are not guaranteed to be ordered,
398 : /// so it's not well defined which LSN you get if there were multiple commits
399 : /// "in flight" at that point in time.
400 : ///
401 0 : pub(crate) async fn find_lsn_for_timestamp(
402 0 : &self,
403 0 : search_timestamp: TimestampTz,
404 0 : cancel: &CancellationToken,
405 0 : ctx: &RequestContext,
406 0 : ) -> Result<LsnForTimestamp, PageReconstructError> {
407 0 : pausable_failpoint!("find-lsn-for-timestamp-pausable");
408 :
409 0 : let gc_cutoff_lsn_guard = self.get_latest_gc_cutoff_lsn();
410 0 : // We use this method to figure out the branching LSN for the new branch, but the
411 0 : // GC cutoff could be before the branching point and we cannot create a new branch
412 0 : // with LSN < `ancestor_lsn`. Thus, pick the maximum of these two to be
413 0 : // on the safe side.
414 0 : let min_lsn = std::cmp::max(*gc_cutoff_lsn_guard, self.get_ancestor_lsn());
415 0 : let max_lsn = self.get_last_record_lsn();
416 0 :
417 0 : // LSNs are always 8-byte aligned. low/mid/high represent the
418 0 : // LSN divided by 8.
419 0 : let mut low = min_lsn.0 / 8;
420 0 : let mut high = max_lsn.0 / 8 + 1;
421 0 :
422 0 : let mut found_smaller = false;
423 0 : let mut found_larger = false;
424 :
425 0 : while low < high {
426 0 : if cancel.is_cancelled() {
427 0 : return Err(PageReconstructError::Cancelled);
428 0 : }
429 0 : // cannot overflow, high and low are both smaller than u64::MAX / 2
430 0 : let mid = (high + low) / 2;
431 :
432 0 : let cmp = self
433 0 : .is_latest_commit_timestamp_ge_than(
434 0 : search_timestamp,
435 0 : Lsn(mid * 8),
436 0 : &mut found_smaller,
437 0 : &mut found_larger,
438 0 : ctx,
439 0 : )
440 0 : .await?;
441 :
442 0 : if cmp {
443 0 : high = mid;
444 0 : } else {
445 0 : low = mid + 1;
446 0 : }
447 : }
448 : // If `found_smaller == true`, `low = t + 1` where `t` is the target LSN,
449 : // so the LSN of the last commit record before or at `search_timestamp`.
450 : // Remove one from `low` to get `t`.
451 : //
452 : // FIXME: it would be better to get the LSN of the previous commit.
453 : // Otherwise, if you restore to the returned LSN, the database will
454 : // include physical changes from later commits that will be marked
455 : // as aborted, and will need to be vacuumed away.
456 0 : let commit_lsn = Lsn((low - 1) * 8);
457 0 : match (found_smaller, found_larger) {
458 : (false, false) => {
459 : // This can happen if no commit records have been processed yet, e.g.
460 : // just after importing a cluster.
461 0 : Ok(LsnForTimestamp::NoData(min_lsn))
462 : }
463 : (false, true) => {
464 : // Didn't find any commit timestamps smaller than the request
465 0 : Ok(LsnForTimestamp::Past(min_lsn))
466 : }
467 0 : (true, _) if commit_lsn < min_lsn => {
468 0 : // the search above did set found_smaller to true but it never increased the lsn.
469 0 : // Then, low is still the old min_lsn, and the subtraction above gave a value
470 0 : // below the min_lsn. We should never do that.
471 0 : Ok(LsnForTimestamp::Past(min_lsn))
472 : }
473 : (true, false) => {
474 : // Only found commits with timestamps smaller than the request.
475 : // It's still a valid case for branch creation, return it.
476 : // And `update_gc_info()` ignores LSN for a `LsnForTimestamp::Future`
477 : // case, anyway.
478 0 : Ok(LsnForTimestamp::Future(commit_lsn))
479 : }
480 0 : (true, true) => Ok(LsnForTimestamp::Present(commit_lsn)),
481 : }
482 0 : }
483 :
484 : /// Subroutine of find_lsn_for_timestamp(). Returns true, if there are any
485 : /// commits that committed after 'search_timestamp', at LSN 'probe_lsn'.
486 : ///
487 : /// Additionally, sets 'found_smaller'/'found_Larger, if encounters any commits
488 : /// with a smaller/larger timestamp.
489 : ///
490 0 : pub(crate) async fn is_latest_commit_timestamp_ge_than(
491 0 : &self,
492 0 : search_timestamp: TimestampTz,
493 0 : probe_lsn: Lsn,
494 0 : found_smaller: &mut bool,
495 0 : found_larger: &mut bool,
496 0 : ctx: &RequestContext,
497 0 : ) -> Result<bool, PageReconstructError> {
498 0 : self.map_all_timestamps(probe_lsn, ctx, |timestamp| {
499 0 : if timestamp >= search_timestamp {
500 0 : *found_larger = true;
501 0 : return ControlFlow::Break(true);
502 0 : } else {
503 0 : *found_smaller = true;
504 0 : }
505 0 : ControlFlow::Continue(())
506 0 : })
507 0 : .await
508 0 : }
509 :
510 : /// Obtain the possible timestamp range for the given lsn.
511 : ///
512 : /// If the lsn has no timestamps, returns None. returns `(min, max, median)` if it has timestamps.
513 0 : pub(crate) async fn get_timestamp_for_lsn(
514 0 : &self,
515 0 : probe_lsn: Lsn,
516 0 : ctx: &RequestContext,
517 0 : ) -> Result<Option<TimestampTz>, PageReconstructError> {
518 0 : let mut max: Option<TimestampTz> = None;
519 0 : self.map_all_timestamps::<()>(probe_lsn, ctx, |timestamp| {
520 0 : if let Some(max_prev) = max {
521 0 : max = Some(max_prev.max(timestamp));
522 0 : } else {
523 0 : max = Some(timestamp);
524 0 : }
525 0 : ControlFlow::Continue(())
526 0 : })
527 0 : .await?;
528 :
529 0 : Ok(max)
530 0 : }
531 :
532 : /// Runs the given function on all the timestamps for a given lsn
533 : ///
534 : /// The return value is either given by the closure, or set to the `Default`
535 : /// impl's output.
536 0 : async fn map_all_timestamps<T: Default>(
537 0 : &self,
538 0 : probe_lsn: Lsn,
539 0 : ctx: &RequestContext,
540 0 : mut f: impl FnMut(TimestampTz) -> ControlFlow<T>,
541 0 : ) -> Result<T, PageReconstructError> {
542 0 : for segno in self
543 0 : .list_slru_segments(SlruKind::Clog, Version::Lsn(probe_lsn), ctx)
544 0 : .await?
545 : {
546 0 : let nblocks = self
547 0 : .get_slru_segment_size(SlruKind::Clog, segno, Version::Lsn(probe_lsn), ctx)
548 0 : .await?;
549 0 : for blknum in (0..nblocks).rev() {
550 0 : let clog_page = self
551 0 : .get_slru_page_at_lsn(SlruKind::Clog, segno, blknum, probe_lsn, ctx)
552 0 : .await?;
553 :
554 0 : if clog_page.len() == BLCKSZ as usize + 8 {
555 0 : let mut timestamp_bytes = [0u8; 8];
556 0 : timestamp_bytes.copy_from_slice(&clog_page[BLCKSZ as usize..]);
557 0 : let timestamp = TimestampTz::from_be_bytes(timestamp_bytes);
558 0 :
559 0 : match f(timestamp) {
560 0 : ControlFlow::Break(b) => return Ok(b),
561 0 : ControlFlow::Continue(()) => (),
562 : }
563 0 : }
564 : }
565 : }
566 0 : Ok(Default::default())
567 0 : }
568 :
569 0 : pub(crate) async fn get_slru_keyspace(
570 0 : &self,
571 0 : version: Version<'_>,
572 0 : ctx: &RequestContext,
573 0 : ) -> Result<KeySpace, PageReconstructError> {
574 0 : let mut accum = KeySpaceAccum::new();
575 :
576 0 : for kind in SlruKind::iter() {
577 0 : let mut segments: Vec<u32> = self
578 0 : .list_slru_segments(kind, version, ctx)
579 0 : .await?
580 0 : .into_iter()
581 0 : .collect();
582 0 : segments.sort_unstable();
583 :
584 0 : for seg in segments {
585 0 : let block_count = self.get_slru_segment_size(kind, seg, version, ctx).await?;
586 :
587 0 : accum.add_range(
588 0 : slru_block_to_key(kind, seg, 0)..slru_block_to_key(kind, seg, block_count),
589 0 : );
590 : }
591 : }
592 :
593 0 : Ok(accum.to_keyspace())
594 0 : }
595 :
596 : /// Get a list of SLRU segments
597 0 : pub(crate) async fn list_slru_segments(
598 0 : &self,
599 0 : kind: SlruKind,
600 0 : version: Version<'_>,
601 0 : ctx: &RequestContext,
602 0 : ) -> Result<HashSet<u32>, PageReconstructError> {
603 0 : // fetch directory entry
604 0 : let key = slru_dir_to_key(kind);
605 :
606 0 : let buf = version.get(self, key, ctx).await?;
607 0 : Ok(SlruSegmentDirectory::des(&buf)?.segments)
608 0 : }
609 :
610 0 : pub(crate) async fn get_relmap_file(
611 0 : &self,
612 0 : spcnode: Oid,
613 0 : dbnode: Oid,
614 0 : version: Version<'_>,
615 0 : ctx: &RequestContext,
616 0 : ) -> Result<Bytes, PageReconstructError> {
617 0 : let key = relmap_file_key(spcnode, dbnode);
618 :
619 0 : let buf = version.get(self, key, ctx).await?;
620 0 : Ok(buf)
621 0 : }
622 :
623 864 : pub(crate) async fn list_dbdirs(
624 864 : &self,
625 864 : lsn: Lsn,
626 864 : ctx: &RequestContext,
627 864 : ) -> Result<HashMap<(Oid, Oid), bool>, PageReconstructError> {
628 : // fetch directory entry
629 9320 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
630 :
631 864 : Ok(DbDirectory::des(&buf)?.dbdirs)
632 864 : }
633 :
634 0 : pub(crate) async fn get_twophase_file(
635 0 : &self,
636 0 : xid: TransactionId,
637 0 : lsn: Lsn,
638 0 : ctx: &RequestContext,
639 0 : ) -> Result<Bytes, PageReconstructError> {
640 0 : let key = twophase_file_key(xid);
641 0 : let buf = self.get(key, lsn, ctx).await?;
642 0 : Ok(buf)
643 0 : }
644 :
645 6 : pub(crate) async fn list_twophase_files(
646 6 : &self,
647 6 : lsn: Lsn,
648 6 : ctx: &RequestContext,
649 6 : ) -> Result<HashSet<TransactionId>, PageReconstructError> {
650 : // fetch directory entry
651 6 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
652 :
653 6 : Ok(TwoPhaseDirectory::des(&buf)?.xids)
654 6 : }
655 :
656 0 : pub(crate) async fn get_control_file(
657 0 : &self,
658 0 : lsn: Lsn,
659 0 : ctx: &RequestContext,
660 0 : ) -> Result<Bytes, PageReconstructError> {
661 0 : self.get(CONTROLFILE_KEY, lsn, ctx).await
662 0 : }
663 :
664 36 : pub(crate) async fn get_checkpoint(
665 36 : &self,
666 36 : lsn: Lsn,
667 36 : ctx: &RequestContext,
668 36 : ) -> Result<Bytes, PageReconstructError> {
669 36 : self.get(CHECKPOINT_KEY, lsn, ctx).await
670 36 : }
671 :
672 48 : async fn list_aux_files_v1(
673 48 : &self,
674 48 : lsn: Lsn,
675 48 : ctx: &RequestContext,
676 48 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
677 48 : match self.get(AUX_FILES_KEY, lsn, ctx).await {
678 30 : Ok(buf) => Ok(AuxFilesDirectory::des(&buf)?.files),
679 18 : Err(e) => {
680 18 : // This is expected: historical databases do not have the key.
681 18 : debug!("Failed to get info about AUX files: {}", e);
682 18 : Ok(HashMap::new())
683 : }
684 : }
685 48 : }
686 :
687 72 : async fn list_aux_files_v2(
688 72 : &self,
689 72 : lsn: Lsn,
690 72 : ctx: &RequestContext,
691 72 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
692 72 : let kv = self
693 72 : .scan(KeySpace::single(Key::metadata_aux_key_range()), lsn, ctx)
694 0 : .await?;
695 72 : let mut result = HashMap::new();
696 72 : let mut sz = 0;
697 180 : for (_, v) in kv {
698 108 : let v = v?;
699 108 : let v = aux_file::decode_file_value_bytes(&v)
700 108 : .context("value decode")
701 108 : .map_err(PageReconstructError::Other)?;
702 210 : for (fname, content) in v {
703 102 : sz += fname.len();
704 102 : sz += content.len();
705 102 : result.insert(fname, content);
706 102 : }
707 : }
708 72 : self.aux_file_size_estimator.on_initial(sz);
709 72 : Ok(result)
710 72 : }
711 :
712 0 : pub(crate) async fn trigger_aux_file_size_computation(
713 0 : &self,
714 0 : lsn: Lsn,
715 0 : ctx: &RequestContext,
716 0 : ) -> Result<(), PageReconstructError> {
717 0 : let current_policy = self.last_aux_file_policy.load();
718 0 : if let Some(AuxFilePolicy::V2) | Some(AuxFilePolicy::CrossValidation) = current_policy {
719 0 : self.list_aux_files_v2(lsn, ctx).await?;
720 0 : }
721 0 : Ok(())
722 0 : }
723 :
724 78 : pub(crate) async fn list_aux_files(
725 78 : &self,
726 78 : lsn: Lsn,
727 78 : ctx: &RequestContext,
728 78 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
729 78 : let current_policy = self.last_aux_file_policy.load();
730 78 : match current_policy {
731 : Some(AuxFilePolicy::V1) => {
732 6 : let res = self.list_aux_files_v1(lsn, ctx).await?;
733 6 : let empty_str = if res.is_empty() { ", empty" } else { "" };
734 6 : warn!(
735 0 : "this timeline is using deprecated aux file policy V1 (policy=v1{empty_str})"
736 : );
737 6 : Ok(res)
738 : }
739 : None => {
740 0 : let res = self.list_aux_files_v1(lsn, ctx).await?;
741 0 : if !res.is_empty() {
742 0 : warn!("this timeline is using deprecated aux file policy V1 (policy=None)");
743 0 : }
744 0 : Ok(res)
745 : }
746 66 : Some(AuxFilePolicy::V2) => self.list_aux_files_v2(lsn, ctx).await,
747 : Some(AuxFilePolicy::CrossValidation) => {
748 6 : let v1_result = self.list_aux_files_v1(lsn, ctx).await;
749 6 : let v2_result = self.list_aux_files_v2(lsn, ctx).await;
750 6 : match (v1_result, v2_result) {
751 6 : (Ok(v1), Ok(v2)) => {
752 6 : if v1 != v2 {
753 0 : tracing::error!(
754 0 : "unmatched aux file v1 v2 result:\nv1 {v1:?}\nv2 {v2:?}"
755 : );
756 0 : return Err(PageReconstructError::Other(anyhow::anyhow!(
757 0 : "unmatched aux file v1 v2 result"
758 0 : )));
759 6 : }
760 6 : Ok(v1)
761 : }
762 0 : (Ok(_), Err(v2)) => {
763 0 : tracing::error!("aux file v1 returns Ok while aux file v2 returns an err");
764 0 : Err(v2)
765 : }
766 0 : (Err(v1), Ok(_)) => {
767 0 : tracing::error!("aux file v2 returns Ok while aux file v1 returns an err");
768 0 : Err(v1)
769 : }
770 0 : (Err(_), Err(v2)) => Err(v2),
771 : }
772 : }
773 : }
774 78 : }
775 :
776 0 : pub(crate) async fn get_replorigins(
777 0 : &self,
778 0 : lsn: Lsn,
779 0 : ctx: &RequestContext,
780 0 : ) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
781 0 : let kv = self
782 0 : .scan(KeySpace::single(repl_origin_key_range()), lsn, ctx)
783 0 : .await?;
784 0 : let mut result = HashMap::new();
785 0 : for (k, v) in kv {
786 0 : let v = v?;
787 0 : let origin_id = k.field6 as RepOriginId;
788 0 : let origin_lsn = Lsn::des(&v).unwrap();
789 0 : if origin_lsn != Lsn::INVALID {
790 0 : result.insert(origin_id, origin_lsn);
791 0 : }
792 : }
793 0 : Ok(result)
794 0 : }
795 :
796 : /// Does the same as get_current_logical_size but counted on demand.
797 : /// Used to initialize the logical size tracking on startup.
798 : ///
799 : /// Only relation blocks are counted currently. That excludes metadata,
800 : /// SLRUs, twophase files etc.
801 : ///
802 : /// # Cancel-Safety
803 : ///
804 : /// This method is cancellation-safe.
805 0 : pub(crate) async fn get_current_logical_size_non_incremental(
806 0 : &self,
807 0 : lsn: Lsn,
808 0 : ctx: &RequestContext,
809 0 : ) -> Result<u64, CalculateLogicalSizeError> {
810 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
811 :
812 : // Fetch list of database dirs and iterate them
813 0 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
814 0 : let dbdir = DbDirectory::des(&buf)?;
815 :
816 0 : let mut total_size: u64 = 0;
817 0 : for (spcnode, dbnode) in dbdir.dbdirs.keys() {
818 0 : for rel in self
819 0 : .list_rels(*spcnode, *dbnode, Version::Lsn(lsn), ctx)
820 0 : .await?
821 : {
822 0 : if self.cancel.is_cancelled() {
823 0 : return Err(CalculateLogicalSizeError::Cancelled);
824 0 : }
825 0 : let relsize_key = rel_size_to_key(rel);
826 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
827 0 : let relsize = buf.get_u32_le();
828 0 :
829 0 : total_size += relsize as u64;
830 : }
831 : }
832 0 : Ok(total_size * BLCKSZ as u64)
833 0 : }
834 :
835 : ///
836 : /// Get a KeySpace that covers all the Keys that are in use at the given LSN.
837 : /// Anything that's not listed maybe removed from the underlying storage (from
838 : /// that LSN forwards).
839 : ///
840 : /// The return value is (dense keyspace, sparse keyspace).
841 864 : pub(crate) async fn collect_keyspace(
842 864 : &self,
843 864 : lsn: Lsn,
844 864 : ctx: &RequestContext,
845 864 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
846 864 : // Iterate through key ranges, greedily packing them into partitions
847 864 : let mut result = KeySpaceAccum::new();
848 864 :
849 864 : // The dbdir metadata always exists
850 864 : result.add_key(DBDIR_KEY);
851 :
852 : // Fetch list of database dirs and iterate them
853 9320 : let dbdir = self.list_dbdirs(lsn, ctx).await?;
854 864 : let mut dbs: Vec<((Oid, Oid), bool)> = dbdir.into_iter().collect();
855 864 :
856 864 : dbs.sort_unstable_by(|(k_a, _), (k_b, _)| k_a.cmp(k_b));
857 864 : for ((spcnode, dbnode), has_relmap_file) in dbs {
858 0 : if has_relmap_file {
859 0 : result.add_key(relmap_file_key(spcnode, dbnode));
860 0 : }
861 0 : result.add_key(rel_dir_to_key(spcnode, dbnode));
862 :
863 0 : let mut rels: Vec<RelTag> = self
864 0 : .list_rels(spcnode, dbnode, Version::Lsn(lsn), ctx)
865 0 : .await?
866 0 : .into_iter()
867 0 : .collect();
868 0 : rels.sort_unstable();
869 0 : for rel in rels {
870 0 : let relsize_key = rel_size_to_key(rel);
871 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
872 0 : let relsize = buf.get_u32_le();
873 0 :
874 0 : result.add_range(rel_block_to_key(rel, 0)..rel_block_to_key(rel, relsize));
875 0 : result.add_key(relsize_key);
876 : }
877 : }
878 :
879 : // Iterate SLRUs next
880 2592 : for kind in [
881 864 : SlruKind::Clog,
882 864 : SlruKind::MultiXactMembers,
883 864 : SlruKind::MultiXactOffsets,
884 : ] {
885 2592 : let slrudir_key = slru_dir_to_key(kind);
886 2592 : result.add_key(slrudir_key);
887 28547 : let buf = self.get(slrudir_key, lsn, ctx).await?;
888 2592 : let dir = SlruSegmentDirectory::des(&buf)?;
889 2592 : let mut segments: Vec<u32> = dir.segments.iter().cloned().collect();
890 2592 : segments.sort_unstable();
891 2592 : for segno in segments {
892 0 : let segsize_key = slru_segment_size_to_key(kind, segno);
893 0 : let mut buf = self.get(segsize_key, lsn, ctx).await?;
894 0 : let segsize = buf.get_u32_le();
895 0 :
896 0 : result.add_range(
897 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, segsize),
898 0 : );
899 0 : result.add_key(segsize_key);
900 : }
901 : }
902 :
903 : // Then pg_twophase
904 864 : result.add_key(TWOPHASEDIR_KEY);
905 9348 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
906 864 : let twophase_dir = TwoPhaseDirectory::des(&buf)?;
907 864 : let mut xids: Vec<TransactionId> = twophase_dir.xids.iter().cloned().collect();
908 864 : xids.sort_unstable();
909 864 : for xid in xids {
910 0 : result.add_key(twophase_file_key(xid));
911 0 : }
912 :
913 864 : result.add_key(CONTROLFILE_KEY);
914 864 : result.add_key(CHECKPOINT_KEY);
915 864 : if self.get(AUX_FILES_KEY, lsn, ctx).await.is_ok() {
916 12 : result.add_key(AUX_FILES_KEY);
917 852 : }
918 :
919 : // Add extra keyspaces in the test cases. Some test cases write keys into the storage without
920 : // creating directory keys. These test cases will add such keyspaces into `extra_test_dense_keyspace`
921 : // and the keys will not be garbage-colllected.
922 : #[cfg(test)]
923 : {
924 864 : let guard = self.extra_test_dense_keyspace.load();
925 864 : for kr in &guard.ranges {
926 0 : result.add_range(kr.clone());
927 0 : }
928 : }
929 :
930 864 : let dense_keyspace = result.to_keyspace();
931 864 : let sparse_keyspace = SparseKeySpace(KeySpace {
932 864 : ranges: vec![Key::metadata_aux_key_range(), repl_origin_key_range()],
933 864 : });
934 864 :
935 864 : if cfg!(debug_assertions) {
936 : // Verify if the sparse keyspaces are ordered and non-overlapping.
937 :
938 : // We do not use KeySpaceAccum for sparse_keyspace because we want to ensure each
939 : // category of sparse keys are split into their own image/delta files. If there
940 : // are overlapping keyspaces, they will be automatically merged by keyspace accum,
941 : // and we want the developer to keep the keyspaces separated.
942 :
943 864 : let ranges = &sparse_keyspace.0.ranges;
944 :
945 : // TODO: use a single overlaps_with across the codebase
946 864 : fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
947 864 : !(a.end <= b.start || b.end <= a.start)
948 864 : }
949 1728 : for i in 0..ranges.len() {
950 1728 : for j in 0..i {
951 864 : if overlaps_with(&ranges[i], &ranges[j]) {
952 0 : panic!(
953 0 : "overlapping sparse keyspace: {}..{} and {}..{}",
954 0 : ranges[i].start, ranges[i].end, ranges[j].start, ranges[j].end
955 0 : );
956 864 : }
957 : }
958 : }
959 864 : for i in 1..ranges.len() {
960 864 : assert!(
961 864 : ranges[i - 1].end <= ranges[i].start,
962 0 : "unordered sparse keyspace: {}..{} and {}..{}",
963 0 : ranges[i - 1].start,
964 0 : ranges[i - 1].end,
965 0 : ranges[i].start,
966 0 : ranges[i].end
967 : );
968 : }
969 0 : }
970 :
971 864 : Ok((dense_keyspace, sparse_keyspace))
972 864 : }
973 :
974 : /// Get cached size of relation if it not updated after specified LSN
975 1345620 : pub fn get_cached_rel_size(&self, tag: &RelTag, lsn: Lsn) -> Option<BlockNumber> {
976 1345620 : let rel_size_cache = self.rel_size_cache.read().unwrap();
977 1345620 : if let Some((cached_lsn, nblocks)) = rel_size_cache.map.get(tag) {
978 1345554 : if lsn >= *cached_lsn {
979 1330116 : return Some(*nblocks);
980 15438 : }
981 66 : }
982 15504 : None
983 1345620 : }
984 :
985 : /// Update cached relation size if there is no more recent update
986 15408 : pub fn update_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
987 15408 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
988 15408 :
989 15408 : if lsn < rel_size_cache.complete_as_of {
990 : // Do not cache old values. It's safe to cache the size on read, as long as
991 : // the read was at an LSN since we started the WAL ingestion. Reasoning: we
992 : // never evict values from the cache, so if the relation size changed after
993 : // 'lsn', the new value is already in the cache.
994 0 : return;
995 15408 : }
996 15408 :
997 15408 : match rel_size_cache.map.entry(tag) {
998 15408 : hash_map::Entry::Occupied(mut entry) => {
999 15408 : let cached_lsn = entry.get_mut();
1000 15408 : if lsn >= cached_lsn.0 {
1001 0 : *cached_lsn = (lsn, nblocks);
1002 15408 : }
1003 : }
1004 0 : hash_map::Entry::Vacant(entry) => {
1005 0 : entry.insert((lsn, nblocks));
1006 0 : }
1007 : }
1008 15408 : }
1009 :
1010 : /// Store cached relation size
1011 866196 : pub fn set_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1012 866196 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1013 866196 : rel_size_cache.map.insert(tag, (lsn, nblocks));
1014 866196 : }
1015 :
1016 : /// Remove cached relation size
1017 6 : pub fn remove_cached_rel_size(&self, tag: &RelTag) {
1018 6 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1019 6 : rel_size_cache.map.remove(tag);
1020 6 : }
1021 : }
1022 :
1023 : /// DatadirModification represents an operation to ingest an atomic set of
1024 : /// updates to the repository.
1025 : ///
1026 : /// It is created by the 'begin_record' function. It is called for each WAL
1027 : /// record, so that all the modifications by a one WAL record appear atomic.
1028 : pub struct DatadirModification<'a> {
1029 : /// The timeline this modification applies to. You can access this to
1030 : /// read the state, but note that any pending updates are *not* reflected
1031 : /// in the state in 'tline' yet.
1032 : pub tline: &'a Timeline,
1033 :
1034 : /// Current LSN of the modification
1035 : lsn: Lsn,
1036 :
1037 : // The modifications are not applied directly to the underlying key-value store.
1038 : // The put-functions add the modifications here, and they are flushed to the
1039 : // underlying key-value store by the 'finish' function.
1040 : pending_lsns: Vec<Lsn>,
1041 : pending_deletions: Vec<(Range<Key>, Lsn)>,
1042 : pending_nblocks: i64,
1043 :
1044 : /// Metadata writes, indexed by key so that they can be read from not-yet-committed modifications
1045 : /// while ingesting subsequent records. See [`Self::is_data_key`] for the definition of 'metadata'.
1046 : pending_metadata_pages: HashMap<CompactKey, Vec<(Lsn, usize, Value)>>,
1047 :
1048 : /// Data writes, ready to be flushed into an ephemeral layer. See [`Self::is_data_key`] for
1049 : /// which keys are stored here.
1050 : pending_data_pages: Vec<(CompactKey, Lsn, usize, Value)>,
1051 :
1052 : // Sometimes during ingest, for example when extending a relation, we would like to write a zero page. However,
1053 : // if we encounter a write from postgres in the same wal record, we will drop this entry.
1054 : //
1055 : // Unlike other 'pending' fields, this does not last until the next call to commit(): it is flushed
1056 : // at the end of each wal record, and all these writes implicitly are at lsn Self::lsn
1057 : pending_zero_data_pages: HashSet<CompactKey>,
1058 :
1059 : /// For special "directory" keys that store key-value maps, track the size of the map
1060 : /// if it was updated in this modification.
1061 : pending_directory_entries: Vec<(DirectoryKind, usize)>,
1062 :
1063 : /// An **approximation** of how large our EphemeralFile write will be when committed.
1064 : pending_bytes: usize,
1065 : }
1066 :
1067 : impl<'a> DatadirModification<'a> {
1068 : // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
1069 : // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
1070 : // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
1071 : pub(crate) const MAX_PENDING_BYTES: usize = 8 * 1024 * 1024;
1072 :
1073 : /// Get the current lsn
1074 1254168 : pub(crate) fn get_lsn(&self) -> Lsn {
1075 1254168 : self.lsn
1076 1254168 : }
1077 :
1078 0 : pub(crate) fn approx_pending_bytes(&self) -> usize {
1079 0 : self.pending_bytes
1080 0 : }
1081 :
1082 0 : pub(crate) fn has_dirty_data_pages(&self) -> bool {
1083 0 : (!self.pending_data_pages.is_empty()) || (!self.pending_zero_data_pages.is_empty())
1084 0 : }
1085 :
1086 : /// Set the current lsn
1087 437574 : pub(crate) fn set_lsn(&mut self, lsn: Lsn) -> anyhow::Result<()> {
1088 437574 : ensure!(
1089 437574 : lsn >= self.lsn,
1090 0 : "setting an older lsn {} than {} is not allowed",
1091 : lsn,
1092 : self.lsn
1093 : );
1094 :
1095 : // If we are advancing LSN, then state from previous wal record should have been flushed.
1096 437574 : assert!(self.pending_zero_data_pages.is_empty());
1097 :
1098 437574 : if lsn > self.lsn {
1099 437574 : self.pending_lsns.push(self.lsn);
1100 437574 : self.lsn = lsn;
1101 437574 : }
1102 437574 : Ok(())
1103 437574 : }
1104 :
1105 : /// In this context, 'metadata' means keys that are only read by the pageserver internally, and 'data' means
1106 : /// keys that represent literal blocks that postgres can read. So data includes relation blocks and
1107 : /// SLRU blocks, which are read directly by postgres, and everything else is considered metadata.
1108 : ///
1109 : /// The distinction is important because data keys are handled on a fast path where dirty writes are
1110 : /// not readable until this modification is committed, whereas metadata keys are visible for read
1111 : /// via [`Self::get`] as soon as their record has been ingested.
1112 2988252 : fn is_data_key(key: &Key) -> bool {
1113 2988252 : key.is_rel_block_key() || key.is_slru_block_key()
1114 2988252 : }
1115 :
1116 : /// Initialize a completely new repository.
1117 : ///
1118 : /// This inserts the directory metadata entries that are assumed to
1119 : /// always exist.
1120 522 : pub fn init_empty(&mut self) -> anyhow::Result<()> {
1121 522 : let buf = DbDirectory::ser(&DbDirectory {
1122 522 : dbdirs: HashMap::new(),
1123 522 : })?;
1124 522 : self.pending_directory_entries.push((DirectoryKind::Db, 0));
1125 522 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1126 522 :
1127 522 : // Create AuxFilesDirectory
1128 522 : self.init_aux_dir()?;
1129 :
1130 522 : let buf = TwoPhaseDirectory::ser(&TwoPhaseDirectory {
1131 522 : xids: HashSet::new(),
1132 522 : })?;
1133 522 : self.pending_directory_entries
1134 522 : .push((DirectoryKind::TwoPhase, 0));
1135 522 : self.put(TWOPHASEDIR_KEY, Value::Image(buf.into()));
1136 :
1137 522 : let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
1138 522 : let empty_dir = Value::Image(buf);
1139 522 : self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
1140 522 : self.pending_directory_entries
1141 522 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1142 522 : self.put(
1143 522 : slru_dir_to_key(SlruKind::MultiXactMembers),
1144 522 : empty_dir.clone(),
1145 522 : );
1146 522 : self.pending_directory_entries
1147 522 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1148 522 : self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
1149 522 : self.pending_directory_entries
1150 522 : .push((DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets), 0));
1151 522 :
1152 522 : Ok(())
1153 522 : }
1154 :
1155 : #[cfg(test)]
1156 516 : pub fn init_empty_test_timeline(&mut self) -> anyhow::Result<()> {
1157 516 : self.init_empty()?;
1158 516 : self.put_control_file(bytes::Bytes::from_static(
1159 516 : b"control_file contents do not matter",
1160 516 : ))
1161 516 : .context("put_control_file")?;
1162 516 : self.put_checkpoint(bytes::Bytes::from_static(
1163 516 : b"checkpoint_file contents do not matter",
1164 516 : ))
1165 516 : .context("put_checkpoint_file")?;
1166 516 : Ok(())
1167 516 : }
1168 :
1169 : /// Put a new page version that can be constructed from a WAL record
1170 : ///
1171 : /// NOTE: this will *not* implicitly extend the relation, if the page is beyond the
1172 : /// current end-of-file. It's up to the caller to check that the relation size
1173 : /// matches the blocks inserted!
1174 436890 : pub fn put_rel_wal_record(
1175 436890 : &mut self,
1176 436890 : rel: RelTag,
1177 436890 : blknum: BlockNumber,
1178 436890 : rec: NeonWalRecord,
1179 436890 : ) -> anyhow::Result<()> {
1180 436890 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1181 436890 : self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
1182 436890 : Ok(())
1183 436890 : }
1184 :
1185 : // Same, but for an SLRU.
1186 24 : pub fn put_slru_wal_record(
1187 24 : &mut self,
1188 24 : kind: SlruKind,
1189 24 : segno: u32,
1190 24 : blknum: BlockNumber,
1191 24 : rec: NeonWalRecord,
1192 24 : ) -> anyhow::Result<()> {
1193 24 : self.put(
1194 24 : slru_block_to_key(kind, segno, blknum),
1195 24 : Value::WalRecord(rec),
1196 24 : );
1197 24 : Ok(())
1198 24 : }
1199 :
1200 : /// Like put_wal_record, but with ready-made image of the page.
1201 833598 : pub fn put_rel_page_image(
1202 833598 : &mut self,
1203 833598 : rel: RelTag,
1204 833598 : blknum: BlockNumber,
1205 833598 : img: Bytes,
1206 833598 : ) -> anyhow::Result<()> {
1207 833598 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1208 833598 : self.put(rel_block_to_key(rel, blknum), Value::Image(img));
1209 833598 : Ok(())
1210 833598 : }
1211 :
1212 18 : pub fn put_slru_page_image(
1213 18 : &mut self,
1214 18 : kind: SlruKind,
1215 18 : segno: u32,
1216 18 : blknum: BlockNumber,
1217 18 : img: Bytes,
1218 18 : ) -> anyhow::Result<()> {
1219 18 : self.put(slru_block_to_key(kind, segno, blknum), Value::Image(img));
1220 18 : Ok(())
1221 18 : }
1222 :
1223 8994 : pub(crate) fn put_rel_page_image_zero(&mut self, rel: RelTag, blknum: BlockNumber) {
1224 8994 : self.pending_zero_data_pages
1225 8994 : .insert(rel_block_to_key(rel, blknum).to_compact());
1226 8994 : self.pending_bytes += ZERO_PAGE.len();
1227 8994 : }
1228 :
1229 0 : pub(crate) fn put_slru_page_image_zero(
1230 0 : &mut self,
1231 0 : kind: SlruKind,
1232 0 : segno: u32,
1233 0 : blknum: BlockNumber,
1234 0 : ) {
1235 0 : self.pending_zero_data_pages
1236 0 : .insert(slru_block_to_key(kind, segno, blknum).to_compact());
1237 0 : self.pending_bytes += ZERO_PAGE.len();
1238 0 : }
1239 :
1240 : /// Call this at the end of each WAL record.
1241 437592 : pub(crate) fn on_record_end(&mut self) {
1242 437592 : let pending_zero_data_pages = std::mem::take(&mut self.pending_zero_data_pages);
1243 446586 : for key in pending_zero_data_pages {
1244 8994 : self.put_data(key, Value::Image(ZERO_PAGE.clone()));
1245 8994 : }
1246 437592 : }
1247 :
1248 : /// Store a relmapper file (pg_filenode.map) in the repository
1249 48 : pub async fn put_relmap_file(
1250 48 : &mut self,
1251 48 : spcnode: Oid,
1252 48 : dbnode: Oid,
1253 48 : img: Bytes,
1254 48 : ctx: &RequestContext,
1255 48 : ) -> anyhow::Result<()> {
1256 : // Add it to the directory (if it doesn't exist already)
1257 48 : let buf = self.get(DBDIR_KEY, ctx).await?;
1258 48 : let mut dbdir = DbDirectory::des(&buf)?;
1259 :
1260 48 : let r = dbdir.dbdirs.insert((spcnode, dbnode), true);
1261 48 : if r.is_none() || r == Some(false) {
1262 : // The dbdir entry didn't exist, or it contained a
1263 : // 'false'. The 'insert' call already updated it with
1264 : // 'true', now write the updated 'dbdirs' map back.
1265 48 : let buf = DbDirectory::ser(&dbdir)?;
1266 48 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1267 48 :
1268 48 : // Create AuxFilesDirectory as well
1269 48 : self.init_aux_dir()?;
1270 0 : }
1271 48 : if r.is_none() {
1272 : // Create RelDirectory
1273 24 : let buf = RelDirectory::ser(&RelDirectory {
1274 24 : rels: HashSet::new(),
1275 24 : })?;
1276 24 : self.pending_directory_entries.push((DirectoryKind::Rel, 0));
1277 24 : self.put(
1278 24 : rel_dir_to_key(spcnode, dbnode),
1279 24 : Value::Image(Bytes::from(buf)),
1280 24 : );
1281 24 : }
1282 :
1283 48 : self.put(relmap_file_key(spcnode, dbnode), Value::Image(img));
1284 48 : Ok(())
1285 48 : }
1286 :
1287 0 : pub async fn put_twophase_file(
1288 0 : &mut self,
1289 0 : xid: TransactionId,
1290 0 : img: Bytes,
1291 0 : ctx: &RequestContext,
1292 0 : ) -> anyhow::Result<()> {
1293 : // Add it to the directory entry
1294 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1295 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
1296 0 : if !dir.xids.insert(xid) {
1297 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1298 0 : }
1299 0 : self.pending_directory_entries
1300 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1301 0 : self.put(
1302 0 : TWOPHASEDIR_KEY,
1303 0 : Value::Image(Bytes::from(TwoPhaseDirectory::ser(&dir)?)),
1304 : );
1305 :
1306 0 : self.put(twophase_file_key(xid), Value::Image(img));
1307 0 : Ok(())
1308 0 : }
1309 :
1310 0 : pub async fn set_replorigin(
1311 0 : &mut self,
1312 0 : origin_id: RepOriginId,
1313 0 : origin_lsn: Lsn,
1314 0 : ) -> anyhow::Result<()> {
1315 0 : let key = repl_origin_key(origin_id);
1316 0 : self.put(key, Value::Image(origin_lsn.ser().unwrap().into()));
1317 0 : Ok(())
1318 0 : }
1319 :
1320 0 : pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> anyhow::Result<()> {
1321 0 : self.set_replorigin(origin_id, Lsn::INVALID).await
1322 0 : }
1323 :
1324 522 : pub fn put_control_file(&mut self, img: Bytes) -> anyhow::Result<()> {
1325 522 : self.put(CONTROLFILE_KEY, Value::Image(img));
1326 522 : Ok(())
1327 522 : }
1328 :
1329 564 : pub fn put_checkpoint(&mut self, img: Bytes) -> anyhow::Result<()> {
1330 564 : self.put(CHECKPOINT_KEY, Value::Image(img));
1331 564 : Ok(())
1332 564 : }
1333 :
1334 0 : pub async fn drop_dbdir(
1335 0 : &mut self,
1336 0 : spcnode: Oid,
1337 0 : dbnode: Oid,
1338 0 : ctx: &RequestContext,
1339 0 : ) -> anyhow::Result<()> {
1340 0 : let total_blocks = self
1341 0 : .tline
1342 0 : .get_db_size(spcnode, dbnode, Version::Modified(self), ctx)
1343 0 : .await?;
1344 :
1345 : // Remove entry from dbdir
1346 0 : let buf = self.get(DBDIR_KEY, ctx).await?;
1347 0 : let mut dir = DbDirectory::des(&buf)?;
1348 0 : if dir.dbdirs.remove(&(spcnode, dbnode)).is_some() {
1349 0 : let buf = DbDirectory::ser(&dir)?;
1350 0 : self.pending_directory_entries
1351 0 : .push((DirectoryKind::Db, dir.dbdirs.len()));
1352 0 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1353 : } else {
1354 0 : warn!(
1355 0 : "dropped dbdir for spcnode {} dbnode {} did not exist in db directory",
1356 : spcnode, dbnode
1357 : );
1358 : }
1359 :
1360 : // Update logical database size.
1361 0 : self.pending_nblocks -= total_blocks as i64;
1362 0 :
1363 0 : // Delete all relations and metadata files for the spcnode/dnode
1364 0 : self.delete(dbdir_key_range(spcnode, dbnode));
1365 0 : Ok(())
1366 0 : }
1367 :
1368 : /// Create a relation fork.
1369 : ///
1370 : /// 'nblocks' is the initial size.
1371 5760 : pub async fn put_rel_creation(
1372 5760 : &mut self,
1373 5760 : rel: RelTag,
1374 5760 : nblocks: BlockNumber,
1375 5760 : ctx: &RequestContext,
1376 5760 : ) -> Result<(), RelationError> {
1377 5760 : if rel.relnode == 0 {
1378 0 : return Err(RelationError::InvalidRelnode);
1379 5760 : }
1380 : // It's possible that this is the first rel for this db in this
1381 : // tablespace. Create the reldir entry for it if so.
1382 5760 : let mut dbdir = DbDirectory::des(&self.get(DBDIR_KEY, ctx).await.context("read db")?)
1383 5760 : .context("deserialize db")?;
1384 5760 : let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1385 5760 : let mut rel_dir =
1386 5760 : if let hash_map::Entry::Vacant(e) = dbdir.dbdirs.entry((rel.spcnode, rel.dbnode)) {
1387 : // Didn't exist. Update dbdir
1388 24 : e.insert(false);
1389 24 : let buf = DbDirectory::ser(&dbdir).context("serialize db")?;
1390 24 : self.pending_directory_entries
1391 24 : .push((DirectoryKind::Db, dbdir.dbdirs.len()));
1392 24 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1393 24 :
1394 24 : // and create the RelDirectory
1395 24 : RelDirectory::default()
1396 : } else {
1397 : // reldir already exists, fetch it
1398 5736 : RelDirectory::des(&self.get(rel_dir_key, ctx).await.context("read db")?)
1399 5736 : .context("deserialize db")?
1400 : };
1401 :
1402 : // Add the new relation to the rel directory entry, and write it back
1403 5760 : if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
1404 0 : return Err(RelationError::AlreadyExists);
1405 5760 : }
1406 5760 :
1407 5760 : self.pending_directory_entries
1408 5760 : .push((DirectoryKind::Rel, rel_dir.rels.len()));
1409 5760 :
1410 5760 : self.put(
1411 5760 : rel_dir_key,
1412 5760 : Value::Image(Bytes::from(
1413 5760 : RelDirectory::ser(&rel_dir).context("serialize")?,
1414 : )),
1415 : );
1416 :
1417 : // Put size
1418 5760 : let size_key = rel_size_to_key(rel);
1419 5760 : let buf = nblocks.to_le_bytes();
1420 5760 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1421 5760 :
1422 5760 : self.pending_nblocks += nblocks as i64;
1423 5760 :
1424 5760 : // Update relation size cache
1425 5760 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1426 5760 :
1427 5760 : // Even if nblocks > 0, we don't insert any actual blocks here. That's up to the
1428 5760 : // caller.
1429 5760 : Ok(())
1430 5760 : }
1431 :
1432 : /// Truncate relation
1433 18036 : pub async fn put_rel_truncation(
1434 18036 : &mut self,
1435 18036 : rel: RelTag,
1436 18036 : nblocks: BlockNumber,
1437 18036 : ctx: &RequestContext,
1438 18036 : ) -> anyhow::Result<()> {
1439 18036 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1440 18036 : if self
1441 18036 : .tline
1442 18036 : .get_rel_exists(rel, Version::Modified(self), ctx)
1443 0 : .await?
1444 : {
1445 18036 : let size_key = rel_size_to_key(rel);
1446 : // Fetch the old size first
1447 18036 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1448 18036 :
1449 18036 : // Update the entry with the new size.
1450 18036 : let buf = nblocks.to_le_bytes();
1451 18036 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1452 18036 :
1453 18036 : // Update relation size cache
1454 18036 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1455 18036 :
1456 18036 : // Update relation size cache
1457 18036 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1458 18036 :
1459 18036 : // Update logical database size.
1460 18036 : self.pending_nblocks -= old_size as i64 - nblocks as i64;
1461 0 : }
1462 18036 : Ok(())
1463 18036 : }
1464 :
1465 : /// Extend relation
1466 : /// If new size is smaller, do nothing.
1467 830040 : pub async fn put_rel_extend(
1468 830040 : &mut self,
1469 830040 : rel: RelTag,
1470 830040 : nblocks: BlockNumber,
1471 830040 : ctx: &RequestContext,
1472 830040 : ) -> anyhow::Result<()> {
1473 830040 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1474 :
1475 : // Put size
1476 830040 : let size_key = rel_size_to_key(rel);
1477 830040 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1478 830040 :
1479 830040 : // only extend relation here. never decrease the size
1480 830040 : if nblocks > old_size {
1481 824364 : let buf = nblocks.to_le_bytes();
1482 824364 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1483 824364 :
1484 824364 : // Update relation size cache
1485 824364 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1486 824364 :
1487 824364 : self.pending_nblocks += nblocks as i64 - old_size as i64;
1488 824364 : }
1489 830040 : Ok(())
1490 830040 : }
1491 :
1492 : /// Drop a relation.
1493 6 : pub async fn put_rel_drop(&mut self, rel: RelTag, ctx: &RequestContext) -> anyhow::Result<()> {
1494 6 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1495 :
1496 : // Remove it from the directory entry
1497 6 : let dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1498 6 : let buf = self.get(dir_key, ctx).await?;
1499 6 : let mut dir = RelDirectory::des(&buf)?;
1500 :
1501 6 : self.pending_directory_entries
1502 6 : .push((DirectoryKind::Rel, dir.rels.len()));
1503 6 :
1504 6 : if dir.rels.remove(&(rel.relnode, rel.forknum)) {
1505 6 : self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
1506 : } else {
1507 0 : warn!("dropped rel {} did not exist in rel directory", rel);
1508 : }
1509 :
1510 : // update logical size
1511 6 : let size_key = rel_size_to_key(rel);
1512 6 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1513 6 : self.pending_nblocks -= old_size as i64;
1514 6 :
1515 6 : // Remove enty from relation size cache
1516 6 : self.tline.remove_cached_rel_size(&rel);
1517 6 :
1518 6 : // Delete size entry, as well as all blocks
1519 6 : self.delete(rel_key_range(rel));
1520 6 :
1521 6 : Ok(())
1522 6 : }
1523 :
1524 18 : pub async fn put_slru_segment_creation(
1525 18 : &mut self,
1526 18 : kind: SlruKind,
1527 18 : segno: u32,
1528 18 : nblocks: BlockNumber,
1529 18 : ctx: &RequestContext,
1530 18 : ) -> anyhow::Result<()> {
1531 18 : // Add it to the directory entry
1532 18 : let dir_key = slru_dir_to_key(kind);
1533 18 : let buf = self.get(dir_key, ctx).await?;
1534 18 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1535 :
1536 18 : if !dir.segments.insert(segno) {
1537 0 : anyhow::bail!("slru segment {kind:?}/{segno} already exists");
1538 18 : }
1539 18 : self.pending_directory_entries
1540 18 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1541 18 : self.put(
1542 18 : dir_key,
1543 18 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1544 : );
1545 :
1546 : // Put size
1547 18 : let size_key = slru_segment_size_to_key(kind, segno);
1548 18 : let buf = nblocks.to_le_bytes();
1549 18 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1550 18 :
1551 18 : // even if nblocks > 0, we don't insert any actual blocks here
1552 18 :
1553 18 : Ok(())
1554 18 : }
1555 :
1556 : /// Extend SLRU segment
1557 0 : pub fn put_slru_extend(
1558 0 : &mut self,
1559 0 : kind: SlruKind,
1560 0 : segno: u32,
1561 0 : nblocks: BlockNumber,
1562 0 : ) -> anyhow::Result<()> {
1563 0 : // Put size
1564 0 : let size_key = slru_segment_size_to_key(kind, segno);
1565 0 : let buf = nblocks.to_le_bytes();
1566 0 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1567 0 : Ok(())
1568 0 : }
1569 :
1570 : /// This method is used for marking truncated SLRU files
1571 0 : pub async fn drop_slru_segment(
1572 0 : &mut self,
1573 0 : kind: SlruKind,
1574 0 : segno: u32,
1575 0 : ctx: &RequestContext,
1576 0 : ) -> anyhow::Result<()> {
1577 0 : // Remove it from the directory entry
1578 0 : let dir_key = slru_dir_to_key(kind);
1579 0 : let buf = self.get(dir_key, ctx).await?;
1580 0 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1581 :
1582 0 : if !dir.segments.remove(&segno) {
1583 0 : warn!("slru segment {:?}/{} does not exist", kind, segno);
1584 0 : }
1585 0 : self.pending_directory_entries
1586 0 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1587 0 : self.put(
1588 0 : dir_key,
1589 0 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1590 : );
1591 :
1592 : // Delete size entry, as well as all blocks
1593 0 : self.delete(slru_segment_key_range(kind, segno));
1594 0 :
1595 0 : Ok(())
1596 0 : }
1597 :
1598 : /// Drop a relmapper file (pg_filenode.map)
1599 0 : pub fn drop_relmap_file(&mut self, _spcnode: Oid, _dbnode: Oid) -> anyhow::Result<()> {
1600 0 : // TODO
1601 0 : Ok(())
1602 0 : }
1603 :
1604 : /// This method is used for marking truncated SLRU files
1605 0 : pub async fn drop_twophase_file(
1606 0 : &mut self,
1607 0 : xid: TransactionId,
1608 0 : ctx: &RequestContext,
1609 0 : ) -> anyhow::Result<()> {
1610 : // Remove it from the directory entry
1611 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1612 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
1613 :
1614 0 : if !dir.xids.remove(&xid) {
1615 0 : warn!("twophase file for xid {} does not exist", xid);
1616 0 : }
1617 0 : self.pending_directory_entries
1618 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1619 0 : self.put(
1620 0 : TWOPHASEDIR_KEY,
1621 0 : Value::Image(Bytes::from(TwoPhaseDirectory::ser(&dir)?)),
1622 : );
1623 :
1624 : // Delete it
1625 0 : self.delete(twophase_key_range(xid));
1626 0 :
1627 0 : Ok(())
1628 0 : }
1629 :
1630 570 : pub fn init_aux_dir(&mut self) -> anyhow::Result<()> {
1631 570 : if let AuxFilePolicy::V2 = self.tline.get_switch_aux_file_policy() {
1632 558 : return Ok(());
1633 12 : }
1634 12 : let buf = AuxFilesDirectory::ser(&AuxFilesDirectory {
1635 12 : files: HashMap::new(),
1636 12 : })?;
1637 12 : self.pending_directory_entries
1638 12 : .push((DirectoryKind::AuxFiles, 0));
1639 12 : self.put(AUX_FILES_KEY, Value::Image(Bytes::from(buf)));
1640 12 : Ok(())
1641 570 : }
1642 :
1643 90 : pub async fn put_file(
1644 90 : &mut self,
1645 90 : path: &str,
1646 90 : content: &[u8],
1647 90 : ctx: &RequestContext,
1648 90 : ) -> anyhow::Result<()> {
1649 90 : let switch_policy = self.tline.get_switch_aux_file_policy();
1650 :
1651 90 : let policy = {
1652 90 : let current_policy = self.tline.last_aux_file_policy.load();
1653 : // Allowed switch path:
1654 : // * no aux files -> v1/v2/cross-validation
1655 : // * cross-validation->v2
1656 :
1657 90 : let current_policy = if current_policy.is_none() {
1658 : // This path will only be hit once per tenant: we will decide the final policy in this code block.
1659 : // The next call to `put_file` will always have `last_aux_file_policy != None`.
1660 36 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
1661 36 : let aux_files_key_v1 = self.tline.list_aux_files_v1(lsn, ctx).await?;
1662 36 : if aux_files_key_v1.is_empty() {
1663 30 : None
1664 : } else {
1665 6 : warn!("this timeline is using deprecated aux file policy V1 (detected existing v1 files)");
1666 6 : self.tline.do_switch_aux_policy(AuxFilePolicy::V1)?;
1667 6 : Some(AuxFilePolicy::V1)
1668 : }
1669 : } else {
1670 54 : current_policy
1671 : };
1672 :
1673 90 : if AuxFilePolicy::is_valid_migration_path(current_policy, switch_policy) {
1674 36 : self.tline.do_switch_aux_policy(switch_policy)?;
1675 36 : info!(current=?current_policy, next=?switch_policy, "switching aux file policy");
1676 36 : switch_policy
1677 : } else {
1678 : // This branch handles non-valid migration path, and the case that switch_policy == current_policy.
1679 : // And actually, because the migration path always allow unspecified -> *, this unwrap_or will never be hit.
1680 54 : current_policy.unwrap_or(AuxFilePolicy::default_tenant_config())
1681 : }
1682 : };
1683 :
1684 90 : if let AuxFilePolicy::V2 | AuxFilePolicy::CrossValidation = policy {
1685 78 : let key = aux_file::encode_aux_file_key(path);
1686 : // retrieve the key from the engine
1687 78 : let old_val = match self.get(key, ctx).await {
1688 18 : Ok(val) => Some(val),
1689 60 : Err(PageReconstructError::MissingKey(_)) => None,
1690 0 : Err(e) => return Err(e.into()),
1691 : };
1692 78 : let files: Vec<(&str, &[u8])> = if let Some(ref old_val) = old_val {
1693 18 : aux_file::decode_file_value(old_val)?
1694 : } else {
1695 60 : Vec::new()
1696 : };
1697 78 : let mut other_files = Vec::with_capacity(files.len());
1698 78 : let mut modifying_file = None;
1699 96 : for file @ (p, content) in files {
1700 18 : if path == p {
1701 18 : assert!(
1702 18 : modifying_file.is_none(),
1703 0 : "duplicated entries found for {}",
1704 : path
1705 : );
1706 18 : modifying_file = Some(content);
1707 0 : } else {
1708 0 : other_files.push(file);
1709 0 : }
1710 : }
1711 78 : let mut new_files = other_files;
1712 78 : match (modifying_file, content.is_empty()) {
1713 12 : (Some(old_content), false) => {
1714 12 : self.tline
1715 12 : .aux_file_size_estimator
1716 12 : .on_update(old_content.len(), content.len());
1717 12 : new_files.push((path, content));
1718 12 : }
1719 6 : (Some(old_content), true) => {
1720 6 : self.tline
1721 6 : .aux_file_size_estimator
1722 6 : .on_remove(old_content.len());
1723 6 : // not adding the file key to the final `new_files` vec.
1724 6 : }
1725 60 : (None, false) => {
1726 60 : self.tline.aux_file_size_estimator.on_add(content.len());
1727 60 : new_files.push((path, content));
1728 60 : }
1729 0 : (None, true) => warn!("removing non-existing aux file: {}", path),
1730 : }
1731 78 : let new_val = aux_file::encode_file_value(&new_files)?;
1732 78 : self.put(key, Value::Image(new_val.into()));
1733 12 : }
1734 :
1735 90 : if let AuxFilePolicy::V1 | AuxFilePolicy::CrossValidation = policy {
1736 18 : let file_path = path.to_string();
1737 18 : let content = if content.is_empty() {
1738 0 : None
1739 : } else {
1740 18 : Some(Bytes::copy_from_slice(content))
1741 : };
1742 :
1743 : let n_files;
1744 18 : let mut aux_files = self.tline.aux_files.lock().await;
1745 18 : if let Some(mut dir) = aux_files.dir.take() {
1746 : // We already updated aux files in `self`: emit a delta and update our latest value.
1747 0 : dir.upsert(file_path.clone(), content.clone());
1748 0 : n_files = dir.files.len();
1749 0 : if aux_files.n_deltas == MAX_AUX_FILE_DELTAS {
1750 0 : self.put(
1751 0 : AUX_FILES_KEY,
1752 0 : Value::Image(Bytes::from(
1753 0 : AuxFilesDirectory::ser(&dir).context("serialize")?,
1754 : )),
1755 : );
1756 0 : aux_files.n_deltas = 0;
1757 0 : } else {
1758 0 : self.put(
1759 0 : AUX_FILES_KEY,
1760 0 : Value::WalRecord(NeonWalRecord::AuxFile { file_path, content }),
1761 0 : );
1762 0 : aux_files.n_deltas += 1;
1763 0 : }
1764 0 : aux_files.dir = Some(dir);
1765 : } else {
1766 : // Check if the AUX_FILES_KEY is initialized
1767 18 : match self.get(AUX_FILES_KEY, ctx).await {
1768 18 : Ok(dir_bytes) => {
1769 18 : let mut dir = AuxFilesDirectory::des(&dir_bytes)?;
1770 : // Key is already set, we may append a delta
1771 18 : self.put(
1772 18 : AUX_FILES_KEY,
1773 18 : Value::WalRecord(NeonWalRecord::AuxFile {
1774 18 : file_path: file_path.clone(),
1775 18 : content: content.clone(),
1776 18 : }),
1777 18 : );
1778 18 : dir.upsert(file_path, content);
1779 18 : n_files = dir.files.len();
1780 18 : aux_files.dir = Some(dir);
1781 : }
1782 : Err(
1783 0 : e @ (PageReconstructError::Cancelled
1784 0 : | PageReconstructError::AncestorLsnTimeout(_)),
1785 0 : ) => {
1786 0 : // Important that we do not interpret a shutdown error as "not found" and thereby
1787 0 : // reset the map.
1788 0 : return Err(e.into());
1789 : }
1790 : // Note: we added missing key error variant in https://github.com/neondatabase/neon/pull/7393 but
1791 : // the original code assumes all other errors are missing keys. Therefore, we keep the code path
1792 : // the same for now, though in theory, we should only match the `MissingKey` variant.
1793 : Err(
1794 0 : e @ (PageReconstructError::Other(_)
1795 : | PageReconstructError::WalRedo(_)
1796 : | PageReconstructError::MissingKey(_)),
1797 : ) => {
1798 : // Key is missing, we must insert an image as the basis for subsequent deltas.
1799 :
1800 0 : if !matches!(e, PageReconstructError::MissingKey(_)) {
1801 0 : let e = utils::error::report_compact_sources(&e);
1802 0 : tracing::warn!("treating error as if it was a missing key: {}", e);
1803 0 : }
1804 :
1805 0 : let mut dir = AuxFilesDirectory {
1806 0 : files: HashMap::new(),
1807 0 : };
1808 0 : dir.upsert(file_path, content);
1809 0 : self.put(
1810 0 : AUX_FILES_KEY,
1811 0 : Value::Image(Bytes::from(
1812 0 : AuxFilesDirectory::ser(&dir).context("serialize")?,
1813 : )),
1814 : );
1815 0 : n_files = 1;
1816 0 : aux_files.dir = Some(dir);
1817 : }
1818 : }
1819 : }
1820 :
1821 18 : self.pending_directory_entries
1822 18 : .push((DirectoryKind::AuxFiles, n_files));
1823 72 : }
1824 :
1825 90 : Ok(())
1826 90 : }
1827 :
1828 : ///
1829 : /// Flush changes accumulated so far to the underlying repository.
1830 : ///
1831 : /// Usually, changes made in DatadirModification are atomic, but this allows
1832 : /// you to flush them to the underlying repository before the final `commit`.
1833 : /// That allows to free up the memory used to hold the pending changes.
1834 : ///
1835 : /// Currently only used during bulk import of a data directory. In that
1836 : /// context, breaking the atomicity is OK. If the import is interrupted, the
1837 : /// whole import fails and the timeline will be deleted anyway.
1838 : /// (Or to be precise, it will be left behind for debugging purposes and
1839 : /// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
1840 : ///
1841 : /// Note: A consequence of flushing the pending operations is that they
1842 : /// won't be visible to subsequent operations until `commit`. The function
1843 : /// retains all the metadata, but data pages are flushed. That's again OK
1844 : /// for bulk import, where you are just loading data pages and won't try to
1845 : /// modify the same pages twice.
1846 5790 : pub(crate) async fn flush(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
1847 5790 : // Unless we have accumulated a decent amount of changes, it's not worth it
1848 5790 : // to scan through the pending_updates list.
1849 5790 : let pending_nblocks = self.pending_nblocks;
1850 5790 : if pending_nblocks < 10000 {
1851 5790 : return Ok(());
1852 0 : }
1853 :
1854 0 : let mut writer = self.tline.writer().await;
1855 :
1856 : // Flush relation and SLRU data blocks, keep metadata.
1857 0 : let pending_data_pages = std::mem::take(&mut self.pending_data_pages);
1858 0 :
1859 0 : // This bails out on first error without modifying pending_updates.
1860 0 : // That's Ok, cf this function's doc comment.
1861 0 : writer.put_batch(pending_data_pages, ctx).await?;
1862 0 : self.pending_bytes = 0;
1863 0 :
1864 0 : if pending_nblocks != 0 {
1865 0 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
1866 0 : self.pending_nblocks = 0;
1867 0 : }
1868 :
1869 0 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
1870 0 : writer.update_directory_entries_count(kind, count as u64);
1871 0 : }
1872 :
1873 0 : Ok(())
1874 5790 : }
1875 :
1876 : ///
1877 : /// Finish this atomic update, writing all the updated keys to the
1878 : /// underlying timeline.
1879 : /// All the modifications in this atomic update are stamped by the specified LSN.
1880 : ///
1881 2229228 : pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
1882 2229228 : // Commit should never be called mid-wal-record
1883 2229228 : assert!(self.pending_zero_data_pages.is_empty());
1884 :
1885 2229228 : let mut writer = self.tline.writer().await;
1886 :
1887 2229228 : let pending_nblocks = self.pending_nblocks;
1888 2229228 : self.pending_nblocks = 0;
1889 2229228 :
1890 2229228 : // Ordering: the items in this batch do not need to be in any global order, but values for
1891 2229228 : // a particular Key must be in Lsn order relative to one another. InMemoryLayer relies on
1892 2229228 : // this to do efficient updates to its index.
1893 2229228 : let mut write_batch = std::mem::take(&mut self.pending_data_pages);
1894 2229228 :
1895 2229228 : write_batch.extend(
1896 2229228 : self.pending_metadata_pages
1897 2229228 : .drain()
1898 2229228 : .flat_map(|(key, values)| {
1899 821460 : values
1900 821460 : .into_iter()
1901 821460 : .map(move |(lsn, value_size, value)| (key, lsn, value_size, value))
1902 2229228 : }),
1903 2229228 : );
1904 2229228 :
1905 2229228 : if !write_batch.is_empty() {
1906 1242180 : writer.put_batch(write_batch, ctx).await?;
1907 987048 : }
1908 :
1909 2229228 : if !self.pending_deletions.is_empty() {
1910 6 : writer.delete_batch(&self.pending_deletions, ctx).await?;
1911 6 : self.pending_deletions.clear();
1912 2229222 : }
1913 :
1914 2229228 : self.pending_lsns.push(self.lsn);
1915 2666802 : for pending_lsn in self.pending_lsns.drain(..) {
1916 2666802 : // Ideally, we should be able to call writer.finish_write() only once
1917 2666802 : // with the highest LSN. However, the last_record_lsn variable in the
1918 2666802 : // timeline keeps track of the latest LSN and the immediate previous LSN
1919 2666802 : // so we need to record every LSN to not leave a gap between them.
1920 2666802 : writer.finish_write(pending_lsn);
1921 2666802 : }
1922 :
1923 2229228 : if pending_nblocks != 0 {
1924 811710 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
1925 1417518 : }
1926 :
1927 2229228 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
1928 8472 : writer.update_directory_entries_count(kind, count as u64);
1929 8472 : }
1930 :
1931 2229228 : self.pending_bytes = 0;
1932 2229228 :
1933 2229228 : Ok(())
1934 2229228 : }
1935 :
1936 875112 : pub(crate) fn len(&self) -> usize {
1937 875112 : self.pending_metadata_pages.len()
1938 875112 : + self.pending_data_pages.len()
1939 875112 : + self.pending_deletions.len()
1940 875112 : }
1941 :
1942 : /// Read a page from the Timeline we are writing to. For metadata pages, this passes through
1943 : /// a cache in Self, which makes writes earlier in this modification visible to WAL records later
1944 : /// in the modification.
1945 : ///
1946 : /// For data pages, reads pass directly to the owning Timeline: any ingest code which reads a data
1947 : /// page must ensure that the pages they read are already committed in Timeline, for example
1948 : /// DB create operations are always preceded by a call to commit(). This is special cased because
1949 : /// it's rare: all the 'normal' WAL operations will only read metadata pages such as relation sizes,
1950 : /// and not data pages.
1951 859806 : async fn get(&self, key: Key, ctx: &RequestContext) -> Result<Bytes, PageReconstructError> {
1952 859806 : if !Self::is_data_key(&key) {
1953 : // Have we already updated the same key? Read the latest pending updated
1954 : // version in that case.
1955 : //
1956 : // Note: we don't check pending_deletions. It is an error to request a
1957 : // value that has been removed, deletion only avoids leaking storage.
1958 859806 : if let Some(values) = self.pending_metadata_pages.get(&key.to_compact()) {
1959 47784 : if let Some((_, _, value)) = values.last() {
1960 47784 : return if let Value::Image(img) = value {
1961 47784 : Ok(img.clone())
1962 : } else {
1963 : // Currently, we never need to read back a WAL record that we
1964 : // inserted in the same "transaction". All the metadata updates
1965 : // work directly with Images, and we never need to read actual
1966 : // data pages. We could handle this if we had to, by calling
1967 : // the walredo manager, but let's keep it simple for now.
1968 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
1969 0 : "unexpected pending WAL record"
1970 0 : )))
1971 : };
1972 0 : }
1973 812022 : }
1974 : } else {
1975 : // This is an expensive check, so we only do it in debug mode. If reading a data key,
1976 : // this key should never be present in pending_data_pages. We ensure this by committing
1977 : // modifications before ingesting DB create operations, which are the only kind that reads
1978 : // data pages during ingest.
1979 0 : if cfg!(debug_assertions) {
1980 0 : for (dirty_key, _, _, _) in &self.pending_data_pages {
1981 0 : debug_assert!(&key.to_compact() != dirty_key);
1982 : }
1983 :
1984 0 : debug_assert!(!self.pending_zero_data_pages.contains(&key.to_compact()))
1985 0 : }
1986 : }
1987 :
1988 : // Metadata page cache miss, or we're reading a data page.
1989 812022 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
1990 812022 : self.tline.get(key, lsn, ctx).await
1991 859806 : }
1992 :
1993 : /// Only used during unit tests, force putting a key into the modification.
1994 : #[cfg(test)]
1995 6 : pub(crate) fn put_for_test(&mut self, key: Key, val: Value) {
1996 6 : self.put(key, val);
1997 6 : }
1998 :
1999 2128446 : fn put(&mut self, key: Key, val: Value) {
2000 2128446 : if Self::is_data_key(&key) {
2001 1270530 : self.put_data(key.to_compact(), val)
2002 : } else {
2003 857916 : self.put_metadata(key.to_compact(), val)
2004 : }
2005 2128446 : }
2006 :
2007 1279524 : fn put_data(&mut self, key: CompactKey, val: Value) {
2008 1279524 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2009 1279524 :
2010 1279524 : // If this page was previously zero'd in the same WalRecord, then drop the previous zero page write. This
2011 1279524 : // is an optimization that avoids persisting both the zero page generated by us (e.g. during a relation extend),
2012 1279524 : // and the subsequent postgres-originating write
2013 1279524 : if self.pending_zero_data_pages.remove(&key) {
2014 0 : self.pending_bytes -= ZERO_PAGE.len();
2015 1279524 : }
2016 :
2017 1279524 : self.pending_bytes += val_serialized_size;
2018 1279524 : self.pending_data_pages
2019 1279524 : .push((key, self.lsn, val_serialized_size, val))
2020 1279524 : }
2021 :
2022 857916 : fn put_metadata(&mut self, key: CompactKey, val: Value) {
2023 857916 : let values = self.pending_metadata_pages.entry(key).or_default();
2024 : // Replace the previous value if it exists at the same lsn
2025 857916 : if let Some((last_lsn, last_value_ser_size, last_value)) = values.last_mut() {
2026 36456 : if *last_lsn == self.lsn {
2027 : // Update the pending_bytes contribution from this entry, and update the serialized size in place
2028 36456 : self.pending_bytes -= *last_value_ser_size;
2029 36456 : *last_value_ser_size = val.serialized_size().unwrap() as usize;
2030 36456 : self.pending_bytes += *last_value_ser_size;
2031 36456 :
2032 36456 : // Use the latest value, this replaces any earlier write to the same (key,lsn), such as much
2033 36456 : // have been generated by synthesized zero page writes prior to the first real write to a page.
2034 36456 : *last_value = val;
2035 36456 : return;
2036 0 : }
2037 821460 : }
2038 :
2039 821460 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2040 821460 : self.pending_bytes += val_serialized_size;
2041 821460 : values.push((self.lsn, val_serialized_size, val));
2042 857916 : }
2043 :
2044 6 : fn delete(&mut self, key_range: Range<Key>) {
2045 6 : trace!("DELETE {}-{}", key_range.start, key_range.end);
2046 6 : self.pending_deletions.push((key_range, self.lsn));
2047 6 : }
2048 : }
2049 :
2050 : /// This struct facilitates accessing either a committed key from the timeline at a
2051 : /// specific LSN, or the latest uncommitted key from a pending modification.
2052 : ///
2053 : /// During WAL ingestion, the records from multiple LSNs may be batched in the same
2054 : /// modification before being flushed to the timeline. Hence, the routines in WalIngest
2055 : /// need to look up the keys in the modification first before looking them up in the
2056 : /// timeline to not miss the latest updates.
2057 : #[derive(Clone, Copy)]
2058 : pub enum Version<'a> {
2059 : Lsn(Lsn),
2060 : Modified(&'a DatadirModification<'a>),
2061 : }
2062 :
2063 : impl<'a> Version<'a> {
2064 70680 : async fn get(
2065 70680 : &self,
2066 70680 : timeline: &Timeline,
2067 70680 : key: Key,
2068 70680 : ctx: &RequestContext,
2069 70680 : ) -> Result<Bytes, PageReconstructError> {
2070 70680 : match self {
2071 70620 : Version::Lsn(lsn) => timeline.get(key, *lsn, ctx).await,
2072 60 : Version::Modified(modification) => modification.get(key, ctx).await,
2073 : }
2074 70680 : }
2075 :
2076 106860 : fn get_lsn(&self) -> Lsn {
2077 106860 : match self {
2078 88722 : Version::Lsn(lsn) => *lsn,
2079 18138 : Version::Modified(modification) => modification.lsn,
2080 : }
2081 106860 : }
2082 : }
2083 :
2084 : //--- Metadata structs stored in key-value pairs in the repository.
2085 :
2086 6726 : #[derive(Debug, Serialize, Deserialize)]
2087 : struct DbDirectory {
2088 : // (spcnode, dbnode) -> (do relmapper and PG_VERSION files exist)
2089 : dbdirs: HashMap<(Oid, Oid), bool>,
2090 : }
2091 :
2092 870 : #[derive(Debug, Serialize, Deserialize)]
2093 : struct TwoPhaseDirectory {
2094 : xids: HashSet<TransactionId>,
2095 : }
2096 :
2097 5796 : #[derive(Debug, Serialize, Deserialize, Default)]
2098 : struct RelDirectory {
2099 : // Set of relations that exist. (relfilenode, forknum)
2100 : //
2101 : // TODO: Store it as a btree or radix tree or something else that spans multiple
2102 : // key-value pairs, if you have a lot of relations
2103 : rels: HashSet<(Oid, u8)>,
2104 : }
2105 :
2106 84 : #[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
2107 : pub(crate) struct AuxFilesDirectory {
2108 : pub(crate) files: HashMap<String, Bytes>,
2109 : }
2110 :
2111 : impl AuxFilesDirectory {
2112 48 : pub(crate) fn upsert(&mut self, key: String, value: Option<Bytes>) {
2113 48 : if let Some(value) = value {
2114 42 : self.files.insert(key, value);
2115 42 : } else {
2116 6 : self.files.remove(&key);
2117 6 : }
2118 48 : }
2119 : }
2120 :
2121 0 : #[derive(Debug, Serialize, Deserialize)]
2122 : struct RelSizeEntry {
2123 : nblocks: u32,
2124 : }
2125 :
2126 2610 : #[derive(Debug, Serialize, Deserialize, Default)]
2127 : struct SlruSegmentDirectory {
2128 : // Set of SLRU segments that exist.
2129 : segments: HashSet<u32>,
2130 : }
2131 :
2132 : #[derive(Copy, Clone, PartialEq, Eq, Debug, enum_map::Enum)]
2133 : #[repr(u8)]
2134 : pub(crate) enum DirectoryKind {
2135 : Db,
2136 : TwoPhase,
2137 : Rel,
2138 : AuxFiles,
2139 : SlruSegment(SlruKind),
2140 : }
2141 :
2142 : impl DirectoryKind {
2143 : pub(crate) const KINDS_NUM: usize = <DirectoryKind as Enum>::LENGTH;
2144 16944 : pub(crate) fn offset(&self) -> usize {
2145 16944 : self.into_usize()
2146 16944 : }
2147 : }
2148 :
2149 : static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]);
2150 :
2151 : #[allow(clippy::bool_assert_comparison)]
2152 : #[cfg(test)]
2153 : mod tests {
2154 : use hex_literal::hex;
2155 : use utils::id::TimelineId;
2156 :
2157 : use super::*;
2158 :
2159 : use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION};
2160 :
2161 : /// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
2162 : #[tokio::test]
2163 6 : async fn aux_files_round_trip() -> anyhow::Result<()> {
2164 6 : let name = "aux_files_round_trip";
2165 6 : let harness = TenantHarness::create(name).await?;
2166 6 :
2167 6 : pub const TIMELINE_ID: TimelineId =
2168 6 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
2169 6 :
2170 24 : let (tenant, ctx) = harness.load().await;
2171 6 : let tline = tenant
2172 6 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
2173 6 : .await?;
2174 6 : let tline = tline.raw_timeline().unwrap();
2175 6 :
2176 6 : // First modification: insert two keys
2177 6 : let mut modification = tline.begin_modification(Lsn(0x1000));
2178 6 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
2179 6 : modification.set_lsn(Lsn(0x1008))?;
2180 6 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
2181 6 : modification.commit(&ctx).await?;
2182 6 : let expect_1008 = HashMap::from([
2183 6 : ("foo/bar1".to_string(), Bytes::from_static(b"content1")),
2184 6 : ("foo/bar2".to_string(), Bytes::from_static(b"content2")),
2185 6 : ]);
2186 6 :
2187 6 : let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
2188 6 : assert_eq!(readback, expect_1008);
2189 6 :
2190 6 : // Second modification: update one key, remove the other
2191 6 : let mut modification = tline.begin_modification(Lsn(0x2000));
2192 6 : modification.put_file("foo/bar1", b"content3", &ctx).await?;
2193 6 : modification.set_lsn(Lsn(0x2008))?;
2194 6 : modification.put_file("foo/bar2", b"", &ctx).await?;
2195 6 : modification.commit(&ctx).await?;
2196 6 : let expect_2008 =
2197 6 : HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
2198 6 :
2199 6 : let readback = tline.list_aux_files(Lsn(0x2008), &ctx).await?;
2200 6 : assert_eq!(readback, expect_2008);
2201 6 :
2202 6 : // Reading back in time works
2203 6 : let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
2204 6 : assert_eq!(readback, expect_1008);
2205 6 :
2206 6 : Ok(())
2207 6 : }
2208 :
2209 : /*
2210 : fn assert_current_logical_size<R: Repository>(timeline: &DatadirTimeline<R>, lsn: Lsn) {
2211 : let incremental = timeline.get_current_logical_size();
2212 : let non_incremental = timeline
2213 : .get_current_logical_size_non_incremental(lsn)
2214 : .unwrap();
2215 : assert_eq!(incremental, non_incremental);
2216 : }
2217 : */
2218 :
2219 : /*
2220 : ///
2221 : /// Test list_rels() function, with branches and dropped relations
2222 : ///
2223 : #[test]
2224 : fn test_list_rels_drop() -> Result<()> {
2225 : let repo = RepoHarness::create("test_list_rels_drop")?.load();
2226 : let tline = create_empty_timeline(repo, TIMELINE_ID)?;
2227 : const TESTDB: u32 = 111;
2228 :
2229 : // Import initial dummy checkpoint record, otherwise the get_timeline() call
2230 : // after branching fails below
2231 : let mut writer = tline.begin_record(Lsn(0x10));
2232 : writer.put_checkpoint(ZERO_CHECKPOINT.clone())?;
2233 : writer.finish()?;
2234 :
2235 : // Create a relation on the timeline
2236 : let mut writer = tline.begin_record(Lsn(0x20));
2237 : writer.put_rel_page_image(TESTREL_A, 0, TEST_IMG("foo blk 0 at 2"))?;
2238 : writer.finish()?;
2239 :
2240 : let writer = tline.begin_record(Lsn(0x00));
2241 : writer.finish()?;
2242 :
2243 : // Check that list_rels() lists it after LSN 2, but no before it
2244 : assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
2245 : assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
2246 : assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
2247 :
2248 : // Create a branch, check that the relation is visible there
2249 : repo.branch_timeline(&tline, NEW_TIMELINE_ID, Lsn(0x30))?;
2250 : let newtline = match repo.get_timeline(NEW_TIMELINE_ID)?.local_timeline() {
2251 : Some(timeline) => timeline,
2252 : None => panic!("Should have a local timeline"),
2253 : };
2254 : let newtline = DatadirTimelineImpl::new(newtline);
2255 : assert!(newtline
2256 : .list_rels(0, TESTDB, Lsn(0x30))?
2257 : .contains(&TESTREL_A));
2258 :
2259 : // Drop it on the branch
2260 : let mut new_writer = newtline.begin_record(Lsn(0x40));
2261 : new_writer.drop_relation(TESTREL_A)?;
2262 : new_writer.finish()?;
2263 :
2264 : // Check that it's no longer listed on the branch after the point where it was dropped
2265 : assert!(newtline
2266 : .list_rels(0, TESTDB, Lsn(0x30))?
2267 : .contains(&TESTREL_A));
2268 : assert!(!newtline
2269 : .list_rels(0, TESTDB, Lsn(0x40))?
2270 : .contains(&TESTREL_A));
2271 :
2272 : // Run checkpoint and garbage collection and check that it's still not visible
2273 : newtline.checkpoint(CheckpointConfig::Forced)?;
2274 : repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
2275 :
2276 : assert!(!newtline
2277 : .list_rels(0, TESTDB, Lsn(0x40))?
2278 : .contains(&TESTREL_A));
2279 :
2280 : Ok(())
2281 : }
2282 : */
2283 :
2284 : /*
2285 : #[test]
2286 : fn test_read_beyond_eof() -> Result<()> {
2287 : let repo = RepoHarness::create("test_read_beyond_eof")?.load();
2288 : let tline = create_test_timeline(repo, TIMELINE_ID)?;
2289 :
2290 : make_some_layers(&tline, Lsn(0x20))?;
2291 : let mut writer = tline.begin_record(Lsn(0x60));
2292 : walingest.put_rel_page_image(
2293 : &mut writer,
2294 : TESTREL_A,
2295 : 0,
2296 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x60))),
2297 : )?;
2298 : writer.finish()?;
2299 :
2300 : // Test read before rel creation. Should error out.
2301 : assert!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x10), false).is_err());
2302 :
2303 : // Read block beyond end of relation at different points in time.
2304 : // These reads should fall into different delta, image, and in-memory layers.
2305 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x20), false)?, ZERO_PAGE);
2306 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x25), false)?, ZERO_PAGE);
2307 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x30), false)?, ZERO_PAGE);
2308 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x35), false)?, ZERO_PAGE);
2309 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x40), false)?, ZERO_PAGE);
2310 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x45), false)?, ZERO_PAGE);
2311 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x50), false)?, ZERO_PAGE);
2312 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x55), false)?, ZERO_PAGE);
2313 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x60), false)?, ZERO_PAGE);
2314 :
2315 : // Test on an in-memory layer with no preceding layer
2316 : let mut writer = tline.begin_record(Lsn(0x70));
2317 : walingest.put_rel_page_image(
2318 : &mut writer,
2319 : TESTREL_B,
2320 : 0,
2321 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x70))),
2322 : )?;
2323 : writer.finish()?;
2324 :
2325 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_B, 1, Lsn(0x70), false)?6, ZERO_PAGE);
2326 :
2327 : Ok(())
2328 : }
2329 : */
2330 : }
|