Line data Source code
1 : //!
2 : //! This provides an abstraction to store PostgreSQL relations and other files
3 : //! in the key-value store that implements the Repository interface.
4 : //!
5 : //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as
6 : //! walingest.rs handles a few things like implicit relation creation and extension.
7 : //! Clarify that)
8 : //!
9 : use std::collections::{BTreeMap, HashMap, HashSet, hash_map};
10 : use std::ops::{ControlFlow, Range};
11 :
12 : use anyhow::{Context, ensure};
13 : use bytes::{Buf, Bytes, BytesMut};
14 : use enum_map::Enum;
15 : use itertools::Itertools;
16 : use pageserver_api::key::{
17 : AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, CompactKey, DBDIR_KEY, Key, RelDirExists,
18 : TWOPHASEDIR_KEY, dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range,
19 : rel_size_to_key, rel_tag_sparse_key, rel_tag_sparse_key_range, relmap_file_key,
20 : repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key,
21 : slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range,
22 : };
23 : use pageserver_api::keyspace::SparseKeySpace;
24 : use pageserver_api::models::RelSizeMigration;
25 : use pageserver_api::record::NeonWalRecord;
26 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
27 : use pageserver_api::shard::ShardIdentity;
28 : use pageserver_api::value::Value;
29 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
30 : use postgres_ffi::{BLCKSZ, Oid, RepOriginId, TimestampTz, TransactionId};
31 : use serde::{Deserialize, Serialize};
32 : use strum::IntoEnumIterator;
33 : use tokio_util::sync::CancellationToken;
34 : use tracing::{debug, info, trace, warn};
35 : use utils::bin_ser::{BeSer, DeserializeError};
36 : use utils::lsn::Lsn;
37 : use utils::pausable_failpoint;
38 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
39 :
40 : use super::tenant::{PageReconstructError, Timeline};
41 : use crate::aux_file;
42 : use crate::context::RequestContext;
43 : use crate::keyspace::{KeySpace, KeySpaceAccum};
44 : use crate::metrics::{
45 : RELSIZE_CACHE_ENTRIES, RELSIZE_CACHE_HITS, RELSIZE_CACHE_MISSES, RELSIZE_CACHE_MISSES_OLD,
46 : };
47 : use crate::span::{
48 : debug_assert_current_span_has_tenant_and_timeline_id,
49 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
50 : };
51 : use crate::tenant::storage_layer::IoConcurrency;
52 : use crate::tenant::timeline::GetVectoredError;
53 :
54 : /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached.
55 : pub const MAX_AUX_FILE_DELTAS: usize = 1024;
56 :
57 : /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached.
58 : pub const MAX_AUX_FILE_V2_DELTAS: usize = 16;
59 :
60 : #[derive(Debug)]
61 : pub enum LsnForTimestamp {
62 : /// Found commits both before and after the given timestamp
63 : Present(Lsn),
64 :
65 : /// Found no commits after the given timestamp, this means
66 : /// that the newest data in the branch is older than the given
67 : /// timestamp.
68 : ///
69 : /// All commits <= LSN happened before the given timestamp
70 : Future(Lsn),
71 :
72 : /// The queried timestamp is past our horizon we look back at (PITR)
73 : ///
74 : /// All commits > LSN happened after the given timestamp,
75 : /// but any commits < LSN might have happened before or after
76 : /// the given timestamp. We don't know because no data before
77 : /// the given lsn is available.
78 : Past(Lsn),
79 :
80 : /// We have found no commit with a timestamp,
81 : /// so we can't return anything meaningful.
82 : ///
83 : /// The associated LSN is the lower bound value we can safely
84 : /// create branches on, but no statement is made if it is
85 : /// older or newer than the timestamp.
86 : ///
87 : /// This variant can e.g. be returned right after a
88 : /// cluster import.
89 : NoData(Lsn),
90 : }
91 :
92 : #[derive(Debug, thiserror::Error)]
93 : pub(crate) enum CalculateLogicalSizeError {
94 : #[error("cancelled")]
95 : Cancelled,
96 :
97 : /// Something went wrong while reading the metadata we use to calculate logical size
98 : /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`]
99 : /// in the `From` implementation for this variant.
100 : #[error(transparent)]
101 : PageRead(PageReconstructError),
102 :
103 : /// Something went wrong deserializing metadata that we read to calculate logical size
104 : #[error("decode error: {0}")]
105 : Decode(#[from] DeserializeError),
106 : }
107 :
108 : #[derive(Debug, thiserror::Error)]
109 : pub(crate) enum CollectKeySpaceError {
110 : #[error(transparent)]
111 : Decode(#[from] DeserializeError),
112 : #[error(transparent)]
113 : PageRead(PageReconstructError),
114 : #[error("cancelled")]
115 : Cancelled,
116 : }
117 :
118 : impl From<PageReconstructError> for CollectKeySpaceError {
119 0 : fn from(err: PageReconstructError) -> Self {
120 0 : match err {
121 0 : PageReconstructError::Cancelled => Self::Cancelled,
122 0 : err => Self::PageRead(err),
123 : }
124 0 : }
125 : }
126 :
127 : impl From<PageReconstructError> for CalculateLogicalSizeError {
128 0 : fn from(pre: PageReconstructError) -> Self {
129 0 : match pre {
130 0 : PageReconstructError::Cancelled => Self::Cancelled,
131 0 : _ => Self::PageRead(pre),
132 : }
133 0 : }
134 : }
135 :
136 : #[derive(Debug, thiserror::Error)]
137 : pub enum RelationError {
138 : #[error("Relation Already Exists")]
139 : AlreadyExists,
140 : #[error("invalid relnode")]
141 : InvalidRelnode,
142 : #[error(transparent)]
143 : Other(#[from] anyhow::Error),
144 : }
145 :
146 : ///
147 : /// This impl provides all the functionality to store PostgreSQL relations, SLRUs,
148 : /// and other special kinds of files, in a versioned key-value store. The
149 : /// Timeline struct provides the key-value store.
150 : ///
151 : /// This is a separate impl, so that we can easily include all these functions in a Timeline
152 : /// implementation, and might be moved into a separate struct later.
153 : impl Timeline {
154 : /// Start ingesting a WAL record, or other atomic modification of
155 : /// the timeline.
156 : ///
157 : /// This provides a transaction-like interface to perform a bunch
158 : /// of modifications atomically.
159 : ///
160 : /// To ingest a WAL record, call begin_modification(lsn) to get a
161 : /// DatadirModification object. Use the functions in the object to
162 : /// modify the repository state, updating all the pages and metadata
163 : /// that the WAL record affects. When you're done, call commit() to
164 : /// commit the changes.
165 : ///
166 : /// Lsn stored in modification is advanced by `ingest_record` and
167 : /// is used by `commit()` to update `last_record_lsn`.
168 : ///
169 : /// Calling commit() will flush all the changes and reset the state,
170 : /// so the `DatadirModification` struct can be reused to perform the next modification.
171 : ///
172 : /// Note that any pending modifications you make through the
173 : /// modification object won't be visible to calls to the 'get' and list
174 : /// functions of the timeline until you finish! And if you update the
175 : /// same page twice, the last update wins.
176 : ///
177 536828 : pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification
178 536828 : where
179 536828 : Self: Sized,
180 536828 : {
181 536828 : DatadirModification {
182 536828 : tline: self,
183 536828 : pending_lsns: Vec::new(),
184 536828 : pending_metadata_pages: HashMap::new(),
185 536828 : pending_data_batch: None,
186 536828 : pending_deletions: Vec::new(),
187 536828 : pending_nblocks: 0,
188 536828 : pending_directory_entries: Vec::new(),
189 536828 : pending_metadata_bytes: 0,
190 536828 : lsn,
191 536828 : }
192 536828 : }
193 :
194 : //------------------------------------------------------------------------------
195 : // Public GET functions
196 : //------------------------------------------------------------------------------
197 :
198 : /// Look up given page version.
199 36768 : pub(crate) async fn get_rel_page_at_lsn(
200 36768 : &self,
201 36768 : tag: RelTag,
202 36768 : blknum: BlockNumber,
203 36768 : version: Version<'_>,
204 36768 : ctx: &RequestContext,
205 36768 : io_concurrency: IoConcurrency,
206 36768 : ) -> Result<Bytes, PageReconstructError> {
207 36768 : match version {
208 36768 : Version::Lsn(effective_lsn) => {
209 36768 : let pages: smallvec::SmallVec<[_; 1]> = smallvec::smallvec![(tag, blknum)];
210 36768 : let res = self
211 36768 : .get_rel_page_at_lsn_batched(
212 36768 : pages.iter().map(|(tag, blknum)| (tag, blknum)),
213 36768 : effective_lsn,
214 36768 : io_concurrency.clone(),
215 36768 : ctx,
216 36768 : )
217 36768 : .await;
218 36768 : assert_eq!(res.len(), 1);
219 36768 : res.into_iter().next().unwrap()
220 : }
221 0 : Version::Modified(modification) => {
222 0 : if tag.relnode == 0 {
223 0 : return Err(PageReconstructError::Other(
224 0 : RelationError::InvalidRelnode.into(),
225 0 : ));
226 0 : }
227 :
228 0 : let nblocks = self.get_rel_size(tag, version, ctx).await?;
229 0 : if blknum >= nblocks {
230 0 : debug!(
231 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
232 0 : tag,
233 0 : blknum,
234 0 : version.get_lsn(),
235 : nblocks
236 : );
237 0 : return Ok(ZERO_PAGE.clone());
238 0 : }
239 0 :
240 0 : let key = rel_block_to_key(tag, blknum);
241 0 : modification.get(key, ctx).await
242 : }
243 : }
244 36768 : }
245 :
246 : /// Like [`Self::get_rel_page_at_lsn`], but returns a batch of pages.
247 : ///
248 : /// The ordering of the returned vec corresponds to the ordering of `pages`.
249 36768 : pub(crate) async fn get_rel_page_at_lsn_batched(
250 36768 : &self,
251 36768 : pages: impl ExactSizeIterator<Item = (&RelTag, &BlockNumber)>,
252 36768 : effective_lsn: Lsn,
253 36768 : io_concurrency: IoConcurrency,
254 36768 : ctx: &RequestContext,
255 36768 : ) -> Vec<Result<Bytes, PageReconstructError>> {
256 36768 : debug_assert_current_span_has_tenant_and_timeline_id();
257 36768 :
258 36768 : let mut slots_filled = 0;
259 36768 : let page_count = pages.len();
260 36768 :
261 36768 : // Would be nice to use smallvec here but it doesn't provide the spare_capacity_mut() API.
262 36768 : let mut result = Vec::with_capacity(pages.len());
263 36768 : let result_slots = result.spare_capacity_mut();
264 36768 :
265 36768 : let mut keys_slots: BTreeMap<Key, smallvec::SmallVec<[usize; 1]>> = BTreeMap::default();
266 36768 : for (response_slot_idx, (tag, blknum)) in pages.enumerate() {
267 36768 : if tag.relnode == 0 {
268 0 : result_slots[response_slot_idx].write(Err(PageReconstructError::Other(
269 0 : RelationError::InvalidRelnode.into(),
270 0 : )));
271 0 :
272 0 : slots_filled += 1;
273 0 : continue;
274 36768 : }
275 :
276 36768 : let nblocks = match self
277 36768 : .get_rel_size(*tag, Version::Lsn(effective_lsn), ctx)
278 36768 : .await
279 : {
280 36768 : Ok(nblocks) => nblocks,
281 0 : Err(err) => {
282 0 : result_slots[response_slot_idx].write(Err(err));
283 0 : slots_filled += 1;
284 0 : continue;
285 : }
286 : };
287 :
288 36768 : if *blknum >= nblocks {
289 0 : debug!(
290 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
291 : tag, blknum, effective_lsn, nblocks
292 : );
293 0 : result_slots[response_slot_idx].write(Ok(ZERO_PAGE.clone()));
294 0 : slots_filled += 1;
295 0 : continue;
296 36768 : }
297 36768 :
298 36768 : let key = rel_block_to_key(*tag, *blknum);
299 36768 :
300 36768 : let key_slots = keys_slots.entry(key).or_default();
301 36768 : key_slots.push(response_slot_idx);
302 : }
303 :
304 36768 : let keyspace = {
305 : // add_key requires monotonicity
306 36768 : let mut acc = KeySpaceAccum::new();
307 36768 : for key in keys_slots
308 36768 : .keys()
309 36768 : // in fact it requires strong monotonicity
310 36768 : .dedup()
311 36768 : {
312 36768 : acc.add_key(*key);
313 36768 : }
314 36768 : acc.to_keyspace()
315 36768 : };
316 36768 :
317 36768 : match self
318 36768 : .get_vectored(keyspace, effective_lsn, io_concurrency, ctx)
319 36768 : .await
320 : {
321 36768 : Ok(results) => {
322 73536 : for (key, res) in results {
323 36768 : let mut key_slots = keys_slots.remove(&key).unwrap().into_iter();
324 36768 : let first_slot = key_slots.next().unwrap();
325 :
326 36768 : for slot in key_slots {
327 0 : let clone = match &res {
328 0 : Ok(buf) => Ok(buf.clone()),
329 0 : Err(err) => Err(match err {
330 0 : PageReconstructError::Cancelled => PageReconstructError::Cancelled,
331 :
332 0 : x @ PageReconstructError::Other(_)
333 0 : | x @ PageReconstructError::AncestorLsnTimeout(_)
334 0 : | x @ PageReconstructError::WalRedo(_)
335 0 : | x @ PageReconstructError::MissingKey(_) => {
336 0 : PageReconstructError::Other(anyhow::anyhow!(
337 0 : "there was more than one request for this key in the batch, error logged once: {x:?}"
338 0 : ))
339 : }
340 : }),
341 : };
342 :
343 0 : result_slots[slot].write(clone);
344 0 : slots_filled += 1;
345 : }
346 :
347 36768 : result_slots[first_slot].write(res);
348 36768 : slots_filled += 1;
349 : }
350 : }
351 0 : Err(err) => {
352 : // this cannot really happen because get_vectored only errors globally on invalid LSN or too large batch size
353 : // (We enforce the max batch size outside of this function, in the code that constructs the batch request.)
354 0 : for slot in keys_slots.values().flatten() {
355 : // this whole `match` is a lot like `From<GetVectoredError> for PageReconstructError`
356 : // but without taking ownership of the GetVectoredError
357 0 : let err = match &err {
358 0 : GetVectoredError::Cancelled => Err(PageReconstructError::Cancelled),
359 : // TODO: restructure get_vectored API to make this error per-key
360 0 : GetVectoredError::MissingKey(err) => {
361 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
362 0 : "whole vectored get request failed because one or more of the requested keys were missing: {err:?}"
363 0 : )))
364 : }
365 : // TODO: restructure get_vectored API to make this error per-key
366 0 : GetVectoredError::GetReadyAncestorError(err) => {
367 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
368 0 : "whole vectored get request failed because one or more key required ancestor that wasn't ready: {err:?}"
369 0 : )))
370 : }
371 : // TODO: restructure get_vectored API to make this error per-key
372 0 : GetVectoredError::Other(err) => Err(PageReconstructError::Other(
373 0 : anyhow::anyhow!("whole vectored get request failed: {err:?}"),
374 0 : )),
375 : // TODO: we can prevent this error class by moving this check into the type system
376 0 : GetVectoredError::InvalidLsn(e) => {
377 0 : Err(anyhow::anyhow!("invalid LSN: {e:?}").into())
378 : }
379 : // NB: this should never happen in practice because we limit MAX_GET_VECTORED_KEYS
380 : // TODO: we can prevent this error class by moving this check into the type system
381 0 : GetVectoredError::Oversized(err) => {
382 0 : Err(anyhow::anyhow!("batching oversized: {err:?}").into())
383 : }
384 : };
385 :
386 0 : result_slots[*slot].write(err);
387 : }
388 :
389 0 : slots_filled += keys_slots.values().map(|slots| slots.len()).sum::<usize>();
390 0 : }
391 : };
392 :
393 36768 : assert_eq!(slots_filled, page_count);
394 : // SAFETY:
395 : // 1. `result` and any of its uninint members are not read from until this point
396 : // 2. The length below is tracked at run-time and matches the number of requested pages.
397 36768 : unsafe {
398 36768 : result.set_len(page_count);
399 36768 : }
400 36768 :
401 36768 : result
402 36768 : }
403 :
404 : /// Get size of a database in blocks. This is only accurate on shard 0. It will undercount on
405 : /// other shards, by only accounting for relations the shard has pages for, and only accounting
406 : /// for pages up to the highest page number it has stored.
407 0 : pub(crate) async fn get_db_size(
408 0 : &self,
409 0 : spcnode: Oid,
410 0 : dbnode: Oid,
411 0 : version: Version<'_>,
412 0 : ctx: &RequestContext,
413 0 : ) -> Result<usize, PageReconstructError> {
414 0 : let mut total_blocks = 0;
415 :
416 0 : let rels = self.list_rels(spcnode, dbnode, version, ctx).await?;
417 :
418 0 : for rel in rels {
419 0 : let n_blocks = self.get_rel_size(rel, version, ctx).await?;
420 0 : total_blocks += n_blocks as usize;
421 : }
422 0 : Ok(total_blocks)
423 0 : }
424 :
425 : /// Get size of a relation file. The relation must exist, otherwise an error is returned.
426 : ///
427 : /// This is only accurate on shard 0. On other shards, it will return the size up to the highest
428 : /// page number stored in the shard.
429 48868 : pub(crate) async fn get_rel_size(
430 48868 : &self,
431 48868 : tag: RelTag,
432 48868 : version: Version<'_>,
433 48868 : ctx: &RequestContext,
434 48868 : ) -> Result<BlockNumber, PageReconstructError> {
435 48868 : if tag.relnode == 0 {
436 0 : return Err(PageReconstructError::Other(
437 0 : RelationError::InvalidRelnode.into(),
438 0 : ));
439 48868 : }
440 :
441 48868 : if let Some(nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
442 38588 : return Ok(nblocks);
443 10280 : }
444 10280 :
445 10280 : if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM)
446 0 : && !self.get_rel_exists(tag, version, ctx).await?
447 : {
448 : // FIXME: Postgres sometimes calls smgrcreate() to create
449 : // FSM, and smgrnblocks() on it immediately afterwards,
450 : // without extending it. Tolerate that by claiming that
451 : // any non-existent FSM fork has size 0.
452 0 : return Ok(0);
453 10280 : }
454 10280 :
455 10280 : let key = rel_size_to_key(tag);
456 10280 : let mut buf = version.get(self, key, ctx).await?;
457 10272 : let nblocks = buf.get_u32_le();
458 10272 :
459 10272 : self.update_cached_rel_size(tag, version.get_lsn(), nblocks);
460 10272 :
461 10272 : Ok(nblocks)
462 48868 : }
463 :
464 : /// Does the relation exist?
465 : ///
466 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
467 : /// the shard stores pages for.
468 12100 : pub(crate) async fn get_rel_exists(
469 12100 : &self,
470 12100 : tag: RelTag,
471 12100 : version: Version<'_>,
472 12100 : ctx: &RequestContext,
473 12100 : ) -> Result<bool, PageReconstructError> {
474 12100 : if tag.relnode == 0 {
475 0 : return Err(PageReconstructError::Other(
476 0 : RelationError::InvalidRelnode.into(),
477 0 : ));
478 12100 : }
479 :
480 : // first try to lookup relation in cache
481 12100 : if let Some(_nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
482 12064 : return Ok(true);
483 36 : }
484 : // then check if the database was already initialized.
485 : // get_rel_exists can be called before dbdir is created.
486 36 : let buf = version.get(self, DBDIR_KEY, ctx).await?;
487 36 : let dbdirs = DbDirectory::des(&buf)?.dbdirs;
488 36 : if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) {
489 0 : return Ok(false);
490 36 : }
491 36 :
492 36 : // Read path: first read the new reldir keyspace. Early return if the relation exists.
493 36 : // Otherwise, read the old reldir keyspace.
494 36 : // TODO: if IndexPart::rel_size_migration is `Migrated`, we only need to read from v2.
495 36 :
496 36 : if let RelSizeMigration::Migrated | RelSizeMigration::Migrating =
497 36 : self.get_rel_size_v2_status()
498 : {
499 : // fetch directory listing (new)
500 0 : let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
501 0 : let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?)
502 0 : .map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
503 0 : let exists_v2 = buf == RelDirExists::Exists;
504 0 : // Fast path: if the relation exists in the new format, return true.
505 0 : // TODO: we should have a verification mode that checks both keyspaces
506 0 : // to ensure the relation only exists in one of them.
507 0 : if exists_v2 {
508 0 : return Ok(true);
509 0 : }
510 36 : }
511 :
512 : // fetch directory listing (old)
513 :
514 36 : let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
515 36 : let buf = version.get(self, key, ctx).await?;
516 :
517 36 : let dir = RelDirectory::des(&buf)?;
518 36 : let exists_v1 = dir.rels.contains(&(tag.relnode, tag.forknum));
519 36 : Ok(exists_v1)
520 12100 : }
521 :
522 : /// Get a list of all existing relations in given tablespace and database.
523 : ///
524 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
525 : /// the shard stores pages for.
526 : ///
527 : /// # Cancel-Safety
528 : ///
529 : /// This method is cancellation-safe.
530 0 : pub(crate) async fn list_rels(
531 0 : &self,
532 0 : spcnode: Oid,
533 0 : dbnode: Oid,
534 0 : version: Version<'_>,
535 0 : ctx: &RequestContext,
536 0 : ) -> Result<HashSet<RelTag>, PageReconstructError> {
537 0 : // fetch directory listing (old)
538 0 : let key = rel_dir_to_key(spcnode, dbnode);
539 0 : let buf = version.get(self, key, ctx).await?;
540 :
541 0 : let dir = RelDirectory::des(&buf)?;
542 0 : let rels_v1: HashSet<RelTag> =
543 0 : HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
544 0 : spcnode,
545 0 : dbnode,
546 0 : relnode: *relnode,
547 0 : forknum: *forknum,
548 0 : }));
549 0 :
550 0 : if let RelSizeMigration::Legacy = self.get_rel_size_v2_status() {
551 0 : return Ok(rels_v1);
552 0 : }
553 0 :
554 0 : // scan directory listing (new), merge with the old results
555 0 : let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
556 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
557 0 : self.conf,
558 0 : self.gate
559 0 : .enter()
560 0 : .map_err(|_| PageReconstructError::Cancelled)?,
561 : );
562 0 : let results = self
563 0 : .scan(
564 0 : KeySpace::single(key_range),
565 0 : version.get_lsn(),
566 0 : ctx,
567 0 : io_concurrency,
568 0 : )
569 0 : .await?;
570 0 : let mut rels = rels_v1;
571 0 : for (key, val) in results {
572 0 : let val = RelDirExists::decode(&val?)
573 0 : .map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
574 0 : assert_eq!(key.field6, 1);
575 0 : assert_eq!(key.field2, spcnode);
576 0 : assert_eq!(key.field3, dbnode);
577 0 : let tag = RelTag {
578 0 : spcnode,
579 0 : dbnode,
580 0 : relnode: key.field4,
581 0 : forknum: key.field5,
582 0 : };
583 0 : if val == RelDirExists::Removed {
584 0 : debug_assert!(!rels.contains(&tag), "removed reltag in v2");
585 0 : continue;
586 0 : }
587 0 : let did_not_contain = rels.insert(tag);
588 0 : debug_assert!(did_not_contain, "duplicate reltag in v2");
589 : }
590 0 : Ok(rels)
591 0 : }
592 :
593 : /// Get the whole SLRU segment
594 0 : pub(crate) async fn get_slru_segment(
595 0 : &self,
596 0 : kind: SlruKind,
597 0 : segno: u32,
598 0 : lsn: Lsn,
599 0 : ctx: &RequestContext,
600 0 : ) -> Result<Bytes, PageReconstructError> {
601 0 : assert!(self.tenant_shard_id.is_shard_zero());
602 0 : let n_blocks = self
603 0 : .get_slru_segment_size(kind, segno, Version::Lsn(lsn), ctx)
604 0 : .await?;
605 0 : let mut segment = BytesMut::with_capacity(n_blocks as usize * BLCKSZ as usize);
606 0 : for blkno in 0..n_blocks {
607 0 : let block = self
608 0 : .get_slru_page_at_lsn(kind, segno, blkno, lsn, ctx)
609 0 : .await?;
610 0 : segment.extend_from_slice(&block[..BLCKSZ as usize]);
611 : }
612 0 : Ok(segment.freeze())
613 0 : }
614 :
615 : /// Look up given SLRU page version.
616 0 : pub(crate) async fn get_slru_page_at_lsn(
617 0 : &self,
618 0 : kind: SlruKind,
619 0 : segno: u32,
620 0 : blknum: BlockNumber,
621 0 : lsn: Lsn,
622 0 : ctx: &RequestContext,
623 0 : ) -> Result<Bytes, PageReconstructError> {
624 0 : assert!(self.tenant_shard_id.is_shard_zero());
625 0 : let key = slru_block_to_key(kind, segno, blknum);
626 0 : self.get(key, lsn, ctx).await
627 0 : }
628 :
629 : /// Get size of an SLRU segment
630 0 : pub(crate) async fn get_slru_segment_size(
631 0 : &self,
632 0 : kind: SlruKind,
633 0 : segno: u32,
634 0 : version: Version<'_>,
635 0 : ctx: &RequestContext,
636 0 : ) -> Result<BlockNumber, PageReconstructError> {
637 0 : assert!(self.tenant_shard_id.is_shard_zero());
638 0 : let key = slru_segment_size_to_key(kind, segno);
639 0 : let mut buf = version.get(self, key, ctx).await?;
640 0 : Ok(buf.get_u32_le())
641 0 : }
642 :
643 : /// Get size of an SLRU segment
644 0 : pub(crate) async fn get_slru_segment_exists(
645 0 : &self,
646 0 : kind: SlruKind,
647 0 : segno: u32,
648 0 : version: Version<'_>,
649 0 : ctx: &RequestContext,
650 0 : ) -> Result<bool, PageReconstructError> {
651 0 : assert!(self.tenant_shard_id.is_shard_zero());
652 : // fetch directory listing
653 0 : let key = slru_dir_to_key(kind);
654 0 : let buf = version.get(self, key, ctx).await?;
655 :
656 0 : let dir = SlruSegmentDirectory::des(&buf)?;
657 0 : Ok(dir.segments.contains(&segno))
658 0 : }
659 :
660 : /// Locate LSN, such that all transactions that committed before
661 : /// 'search_timestamp' are visible, but nothing newer is.
662 : ///
663 : /// This is not exact. Commit timestamps are not guaranteed to be ordered,
664 : /// so it's not well defined which LSN you get if there were multiple commits
665 : /// "in flight" at that point in time.
666 : ///
667 0 : pub(crate) async fn find_lsn_for_timestamp(
668 0 : &self,
669 0 : search_timestamp: TimestampTz,
670 0 : cancel: &CancellationToken,
671 0 : ctx: &RequestContext,
672 0 : ) -> Result<LsnForTimestamp, PageReconstructError> {
673 0 : pausable_failpoint!("find-lsn-for-timestamp-pausable");
674 :
675 0 : let gc_cutoff_lsn_guard = self.get_applied_gc_cutoff_lsn();
676 0 : let gc_cutoff_planned = {
677 0 : let gc_info = self.gc_info.read().unwrap();
678 0 : gc_info.min_cutoff()
679 0 : };
680 0 : // Usually the planned cutoff is newer than the cutoff of the last gc run,
681 0 : // but let's be defensive.
682 0 : let gc_cutoff = gc_cutoff_planned.max(*gc_cutoff_lsn_guard);
683 0 : // We use this method to figure out the branching LSN for the new branch, but the
684 0 : // GC cutoff could be before the branching point and we cannot create a new branch
685 0 : // with LSN < `ancestor_lsn`. Thus, pick the maximum of these two to be
686 0 : // on the safe side.
687 0 : let min_lsn = std::cmp::max(gc_cutoff, self.get_ancestor_lsn());
688 0 : let max_lsn = self.get_last_record_lsn();
689 0 :
690 0 : // LSNs are always 8-byte aligned. low/mid/high represent the
691 0 : // LSN divided by 8.
692 0 : let mut low = min_lsn.0 / 8;
693 0 : let mut high = max_lsn.0 / 8 + 1;
694 0 :
695 0 : let mut found_smaller = false;
696 0 : let mut found_larger = false;
697 :
698 0 : while low < high {
699 0 : if cancel.is_cancelled() {
700 0 : return Err(PageReconstructError::Cancelled);
701 0 : }
702 0 : // cannot overflow, high and low are both smaller than u64::MAX / 2
703 0 : let mid = (high + low) / 2;
704 :
705 0 : let cmp = match self
706 0 : .is_latest_commit_timestamp_ge_than(
707 0 : search_timestamp,
708 0 : Lsn(mid * 8),
709 0 : &mut found_smaller,
710 0 : &mut found_larger,
711 0 : ctx,
712 0 : )
713 0 : .await
714 : {
715 0 : Ok(res) => res,
716 0 : Err(PageReconstructError::MissingKey(e)) => {
717 0 : warn!(
718 0 : "Missing key while find_lsn_for_timestamp. Either we might have already garbage-collected that data or the key is really missing. Last error: {:#}",
719 : e
720 : );
721 : // Return that we didn't find any requests smaller than the LSN, and logging the error.
722 0 : return Ok(LsnForTimestamp::Past(min_lsn));
723 : }
724 0 : Err(e) => return Err(e),
725 : };
726 :
727 0 : if cmp {
728 0 : high = mid;
729 0 : } else {
730 0 : low = mid + 1;
731 0 : }
732 : }
733 :
734 : // If `found_smaller == true`, `low = t + 1` where `t` is the target LSN,
735 : // so the LSN of the last commit record before or at `search_timestamp`.
736 : // Remove one from `low` to get `t`.
737 : //
738 : // FIXME: it would be better to get the LSN of the previous commit.
739 : // Otherwise, if you restore to the returned LSN, the database will
740 : // include physical changes from later commits that will be marked
741 : // as aborted, and will need to be vacuumed away.
742 0 : let commit_lsn = Lsn((low - 1) * 8);
743 0 : match (found_smaller, found_larger) {
744 : (false, false) => {
745 : // This can happen if no commit records have been processed yet, e.g.
746 : // just after importing a cluster.
747 0 : Ok(LsnForTimestamp::NoData(min_lsn))
748 : }
749 : (false, true) => {
750 : // Didn't find any commit timestamps smaller than the request
751 0 : Ok(LsnForTimestamp::Past(min_lsn))
752 : }
753 0 : (true, _) if commit_lsn < min_lsn => {
754 0 : // the search above did set found_smaller to true but it never increased the lsn.
755 0 : // Then, low is still the old min_lsn, and the subtraction above gave a value
756 0 : // below the min_lsn. We should never do that.
757 0 : Ok(LsnForTimestamp::Past(min_lsn))
758 : }
759 : (true, false) => {
760 : // Only found commits with timestamps smaller than the request.
761 : // It's still a valid case for branch creation, return it.
762 : // And `update_gc_info()` ignores LSN for a `LsnForTimestamp::Future`
763 : // case, anyway.
764 0 : Ok(LsnForTimestamp::Future(commit_lsn))
765 : }
766 0 : (true, true) => Ok(LsnForTimestamp::Present(commit_lsn)),
767 : }
768 0 : }
769 :
770 : /// Subroutine of find_lsn_for_timestamp(). Returns true, if there are any
771 : /// commits that committed after 'search_timestamp', at LSN 'probe_lsn'.
772 : ///
773 : /// Additionally, sets 'found_smaller'/'found_Larger, if encounters any commits
774 : /// with a smaller/larger timestamp.
775 : ///
776 0 : pub(crate) async fn is_latest_commit_timestamp_ge_than(
777 0 : &self,
778 0 : search_timestamp: TimestampTz,
779 0 : probe_lsn: Lsn,
780 0 : found_smaller: &mut bool,
781 0 : found_larger: &mut bool,
782 0 : ctx: &RequestContext,
783 0 : ) -> Result<bool, PageReconstructError> {
784 0 : self.map_all_timestamps(probe_lsn, ctx, |timestamp| {
785 0 : if timestamp >= search_timestamp {
786 0 : *found_larger = true;
787 0 : return ControlFlow::Break(true);
788 0 : } else {
789 0 : *found_smaller = true;
790 0 : }
791 0 : ControlFlow::Continue(())
792 0 : })
793 0 : .await
794 0 : }
795 :
796 : /// Obtain the possible timestamp range for the given lsn.
797 : ///
798 : /// If the lsn has no timestamps, returns None. returns `(min, max, median)` if it has timestamps.
799 0 : pub(crate) async fn get_timestamp_for_lsn(
800 0 : &self,
801 0 : probe_lsn: Lsn,
802 0 : ctx: &RequestContext,
803 0 : ) -> Result<Option<TimestampTz>, PageReconstructError> {
804 0 : let mut max: Option<TimestampTz> = None;
805 0 : self.map_all_timestamps::<()>(probe_lsn, ctx, |timestamp| {
806 0 : if let Some(max_prev) = max {
807 0 : max = Some(max_prev.max(timestamp));
808 0 : } else {
809 0 : max = Some(timestamp);
810 0 : }
811 0 : ControlFlow::Continue(())
812 0 : })
813 0 : .await?;
814 :
815 0 : Ok(max)
816 0 : }
817 :
818 : /// Runs the given function on all the timestamps for a given lsn
819 : ///
820 : /// The return value is either given by the closure, or set to the `Default`
821 : /// impl's output.
822 0 : async fn map_all_timestamps<T: Default>(
823 0 : &self,
824 0 : probe_lsn: Lsn,
825 0 : ctx: &RequestContext,
826 0 : mut f: impl FnMut(TimestampTz) -> ControlFlow<T>,
827 0 : ) -> Result<T, PageReconstructError> {
828 0 : for segno in self
829 0 : .list_slru_segments(SlruKind::Clog, Version::Lsn(probe_lsn), ctx)
830 0 : .await?
831 : {
832 0 : let nblocks = self
833 0 : .get_slru_segment_size(SlruKind::Clog, segno, Version::Lsn(probe_lsn), ctx)
834 0 : .await?;
835 0 : for blknum in (0..nblocks).rev() {
836 0 : let clog_page = self
837 0 : .get_slru_page_at_lsn(SlruKind::Clog, segno, blknum, probe_lsn, ctx)
838 0 : .await?;
839 :
840 0 : if clog_page.len() == BLCKSZ as usize + 8 {
841 0 : let mut timestamp_bytes = [0u8; 8];
842 0 : timestamp_bytes.copy_from_slice(&clog_page[BLCKSZ as usize..]);
843 0 : let timestamp = TimestampTz::from_be_bytes(timestamp_bytes);
844 0 :
845 0 : match f(timestamp) {
846 0 : ControlFlow::Break(b) => return Ok(b),
847 0 : ControlFlow::Continue(()) => (),
848 : }
849 0 : }
850 : }
851 : }
852 0 : Ok(Default::default())
853 0 : }
854 :
855 0 : pub(crate) async fn get_slru_keyspace(
856 0 : &self,
857 0 : version: Version<'_>,
858 0 : ctx: &RequestContext,
859 0 : ) -> Result<KeySpace, PageReconstructError> {
860 0 : let mut accum = KeySpaceAccum::new();
861 :
862 0 : for kind in SlruKind::iter() {
863 0 : let mut segments: Vec<u32> = self
864 0 : .list_slru_segments(kind, version, ctx)
865 0 : .await?
866 0 : .into_iter()
867 0 : .collect();
868 0 : segments.sort_unstable();
869 :
870 0 : for seg in segments {
871 0 : let block_count = self.get_slru_segment_size(kind, seg, version, ctx).await?;
872 :
873 0 : accum.add_range(
874 0 : slru_block_to_key(kind, seg, 0)..slru_block_to_key(kind, seg, block_count),
875 0 : );
876 : }
877 : }
878 :
879 0 : Ok(accum.to_keyspace())
880 0 : }
881 :
882 : /// Get a list of SLRU segments
883 0 : pub(crate) async fn list_slru_segments(
884 0 : &self,
885 0 : kind: SlruKind,
886 0 : version: Version<'_>,
887 0 : ctx: &RequestContext,
888 0 : ) -> Result<HashSet<u32>, PageReconstructError> {
889 0 : // fetch directory entry
890 0 : let key = slru_dir_to_key(kind);
891 :
892 0 : let buf = version.get(self, key, ctx).await?;
893 0 : Ok(SlruSegmentDirectory::des(&buf)?.segments)
894 0 : }
895 :
896 0 : pub(crate) async fn get_relmap_file(
897 0 : &self,
898 0 : spcnode: Oid,
899 0 : dbnode: Oid,
900 0 : version: Version<'_>,
901 0 : ctx: &RequestContext,
902 0 : ) -> Result<Bytes, PageReconstructError> {
903 0 : let key = relmap_file_key(spcnode, dbnode);
904 :
905 0 : let buf = version.get(self, key, ctx).await?;
906 0 : Ok(buf)
907 0 : }
908 :
909 652 : pub(crate) async fn list_dbdirs(
910 652 : &self,
911 652 : lsn: Lsn,
912 652 : ctx: &RequestContext,
913 652 : ) -> Result<HashMap<(Oid, Oid), bool>, PageReconstructError> {
914 : // fetch directory entry
915 652 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
916 :
917 652 : Ok(DbDirectory::des(&buf)?.dbdirs)
918 652 : }
919 :
920 0 : pub(crate) async fn get_twophase_file(
921 0 : &self,
922 0 : xid: u64,
923 0 : lsn: Lsn,
924 0 : ctx: &RequestContext,
925 0 : ) -> Result<Bytes, PageReconstructError> {
926 0 : let key = twophase_file_key(xid);
927 0 : let buf = self.get(key, lsn, ctx).await?;
928 0 : Ok(buf)
929 0 : }
930 :
931 656 : pub(crate) async fn list_twophase_files(
932 656 : &self,
933 656 : lsn: Lsn,
934 656 : ctx: &RequestContext,
935 656 : ) -> Result<HashSet<u64>, PageReconstructError> {
936 : // fetch directory entry
937 656 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
938 :
939 656 : if self.pg_version >= 17 {
940 0 : Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
941 : } else {
942 656 : Ok(TwoPhaseDirectory::des(&buf)?
943 : .xids
944 656 : .iter()
945 656 : .map(|x| u64::from(*x))
946 656 : .collect())
947 : }
948 656 : }
949 :
950 0 : pub(crate) async fn get_control_file(
951 0 : &self,
952 0 : lsn: Lsn,
953 0 : ctx: &RequestContext,
954 0 : ) -> Result<Bytes, PageReconstructError> {
955 0 : self.get(CONTROLFILE_KEY, lsn, ctx).await
956 0 : }
957 :
958 24 : pub(crate) async fn get_checkpoint(
959 24 : &self,
960 24 : lsn: Lsn,
961 24 : ctx: &RequestContext,
962 24 : ) -> Result<Bytes, PageReconstructError> {
963 24 : self.get(CHECKPOINT_KEY, lsn, ctx).await
964 24 : }
965 :
966 24 : async fn list_aux_files_v2(
967 24 : &self,
968 24 : lsn: Lsn,
969 24 : ctx: &RequestContext,
970 24 : io_concurrency: IoConcurrency,
971 24 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
972 24 : let kv = self
973 24 : .scan(
974 24 : KeySpace::single(Key::metadata_aux_key_range()),
975 24 : lsn,
976 24 : ctx,
977 24 : io_concurrency,
978 24 : )
979 24 : .await?;
980 24 : let mut result = HashMap::new();
981 24 : let mut sz = 0;
982 60 : for (_, v) in kv {
983 36 : let v = v?;
984 36 : let v = aux_file::decode_file_value_bytes(&v)
985 36 : .context("value decode")
986 36 : .map_err(PageReconstructError::Other)?;
987 68 : for (fname, content) in v {
988 32 : sz += fname.len();
989 32 : sz += content.len();
990 32 : result.insert(fname, content);
991 32 : }
992 : }
993 24 : self.aux_file_size_estimator.on_initial(sz);
994 24 : Ok(result)
995 24 : }
996 :
997 0 : pub(crate) async fn trigger_aux_file_size_computation(
998 0 : &self,
999 0 : lsn: Lsn,
1000 0 : ctx: &RequestContext,
1001 0 : io_concurrency: IoConcurrency,
1002 0 : ) -> Result<(), PageReconstructError> {
1003 0 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await?;
1004 0 : Ok(())
1005 0 : }
1006 :
1007 24 : pub(crate) async fn list_aux_files(
1008 24 : &self,
1009 24 : lsn: Lsn,
1010 24 : ctx: &RequestContext,
1011 24 : io_concurrency: IoConcurrency,
1012 24 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
1013 24 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await
1014 24 : }
1015 :
1016 0 : pub(crate) async fn get_replorigins(
1017 0 : &self,
1018 0 : lsn: Lsn,
1019 0 : ctx: &RequestContext,
1020 0 : io_concurrency: IoConcurrency,
1021 0 : ) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
1022 0 : let kv = self
1023 0 : .scan(
1024 0 : KeySpace::single(repl_origin_key_range()),
1025 0 : lsn,
1026 0 : ctx,
1027 0 : io_concurrency,
1028 0 : )
1029 0 : .await?;
1030 0 : let mut result = HashMap::new();
1031 0 : for (k, v) in kv {
1032 0 : let v = v?;
1033 0 : let origin_id = k.field6 as RepOriginId;
1034 0 : let origin_lsn = Lsn::des(&v).unwrap();
1035 0 : if origin_lsn != Lsn::INVALID {
1036 0 : result.insert(origin_id, origin_lsn);
1037 0 : }
1038 : }
1039 0 : Ok(result)
1040 0 : }
1041 :
1042 : /// Does the same as get_current_logical_size but counted on demand.
1043 : /// Used to initialize the logical size tracking on startup.
1044 : ///
1045 : /// Only relation blocks are counted currently. That excludes metadata,
1046 : /// SLRUs, twophase files etc.
1047 : ///
1048 : /// # Cancel-Safety
1049 : ///
1050 : /// This method is cancellation-safe.
1051 0 : pub(crate) async fn get_current_logical_size_non_incremental(
1052 0 : &self,
1053 0 : lsn: Lsn,
1054 0 : ctx: &RequestContext,
1055 0 : ) -> Result<u64, CalculateLogicalSizeError> {
1056 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
1057 0 :
1058 0 : fail::fail_point!("skip-logical-size-calculation", |_| { Ok(0) });
1059 :
1060 : // Fetch list of database dirs and iterate them
1061 0 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
1062 0 : let dbdir = DbDirectory::des(&buf)?;
1063 :
1064 0 : let mut total_size: u64 = 0;
1065 0 : for (spcnode, dbnode) in dbdir.dbdirs.keys() {
1066 0 : for rel in self
1067 0 : .list_rels(*spcnode, *dbnode, Version::Lsn(lsn), ctx)
1068 0 : .await?
1069 : {
1070 0 : if self.cancel.is_cancelled() {
1071 0 : return Err(CalculateLogicalSizeError::Cancelled);
1072 0 : }
1073 0 : let relsize_key = rel_size_to_key(rel);
1074 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1075 0 : let relsize = buf.get_u32_le();
1076 0 :
1077 0 : total_size += relsize as u64;
1078 : }
1079 : }
1080 0 : Ok(total_size * BLCKSZ as u64)
1081 0 : }
1082 :
1083 : /// Get a KeySpace that covers all the Keys that are in use at AND below the given LSN. This is only used
1084 : /// for gc-compaction.
1085 : ///
1086 : /// gc-compaction cannot use the same `collect_keyspace` function as the legacy compaction because it
1087 : /// processes data at multiple LSNs and needs to be aware of the fact that some key ranges might need to
1088 : /// be kept only for a specific range of LSN.
1089 : ///
1090 : /// Consider the case that the user created branches at LSN 10 and 20, where the user created a table A at
1091 : /// LSN 10 and dropped that table at LSN 20. `collect_keyspace` at LSN 10 will return the key range
1092 : /// corresponding to that table, while LSN 20 won't. The keyspace info at a single LSN is not enough to
1093 : /// determine which keys to retain/drop for gc-compaction.
1094 : ///
1095 : /// For now, it only drops AUX-v1 keys. But in the future, the function will be extended to return the keyspace
1096 : /// to be retained for each of the branch LSN.
1097 : ///
1098 : /// The return value is (dense keyspace, sparse keyspace).
1099 104 : pub(crate) async fn collect_gc_compaction_keyspace(
1100 104 : &self,
1101 104 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1102 104 : let metadata_key_begin = Key::metadata_key_range().start;
1103 104 : let aux_v1_key = AUX_FILES_KEY;
1104 104 : let dense_keyspace = KeySpace {
1105 104 : ranges: vec![Key::MIN..aux_v1_key, aux_v1_key.next()..metadata_key_begin],
1106 104 : };
1107 104 : Ok((
1108 104 : dense_keyspace,
1109 104 : SparseKeySpace(KeySpace::single(Key::metadata_key_range())),
1110 104 : ))
1111 104 : }
1112 :
1113 : ///
1114 : /// Get a KeySpace that covers all the Keys that are in use at the given LSN.
1115 : /// Anything that's not listed maybe removed from the underlying storage (from
1116 : /// that LSN forwards).
1117 : ///
1118 : /// The return value is (dense keyspace, sparse keyspace).
1119 652 : pub(crate) async fn collect_keyspace(
1120 652 : &self,
1121 652 : lsn: Lsn,
1122 652 : ctx: &RequestContext,
1123 652 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1124 652 : // Iterate through key ranges, greedily packing them into partitions
1125 652 : let mut result = KeySpaceAccum::new();
1126 652 :
1127 652 : // The dbdir metadata always exists
1128 652 : result.add_key(DBDIR_KEY);
1129 :
1130 : // Fetch list of database dirs and iterate them
1131 652 : let dbdir = self.list_dbdirs(lsn, ctx).await?;
1132 652 : let mut dbs: Vec<((Oid, Oid), bool)> = dbdir.into_iter().collect();
1133 652 :
1134 652 : dbs.sort_unstable_by(|(k_a, _), (k_b, _)| k_a.cmp(k_b));
1135 652 : for ((spcnode, dbnode), has_relmap_file) in dbs {
1136 0 : if has_relmap_file {
1137 0 : result.add_key(relmap_file_key(spcnode, dbnode));
1138 0 : }
1139 0 : result.add_key(rel_dir_to_key(spcnode, dbnode));
1140 :
1141 0 : let mut rels: Vec<RelTag> = self
1142 0 : .list_rels(spcnode, dbnode, Version::Lsn(lsn), ctx)
1143 0 : .await?
1144 0 : .into_iter()
1145 0 : .collect();
1146 0 : rels.sort_unstable();
1147 0 : for rel in rels {
1148 0 : let relsize_key = rel_size_to_key(rel);
1149 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1150 0 : let relsize = buf.get_u32_le();
1151 0 :
1152 0 : result.add_range(rel_block_to_key(rel, 0)..rel_block_to_key(rel, relsize));
1153 0 : result.add_key(relsize_key);
1154 : }
1155 : }
1156 :
1157 : // Iterate SLRUs next
1158 652 : if self.tenant_shard_id.is_shard_zero() {
1159 1920 : for kind in [
1160 640 : SlruKind::Clog,
1161 640 : SlruKind::MultiXactMembers,
1162 640 : SlruKind::MultiXactOffsets,
1163 : ] {
1164 1920 : let slrudir_key = slru_dir_to_key(kind);
1165 1920 : result.add_key(slrudir_key);
1166 1920 : let buf = self.get(slrudir_key, lsn, ctx).await?;
1167 1920 : let dir = SlruSegmentDirectory::des(&buf)?;
1168 1920 : let mut segments: Vec<u32> = dir.segments.iter().cloned().collect();
1169 1920 : segments.sort_unstable();
1170 1920 : for segno in segments {
1171 0 : let segsize_key = slru_segment_size_to_key(kind, segno);
1172 0 : let mut buf = self.get(segsize_key, lsn, ctx).await?;
1173 0 : let segsize = buf.get_u32_le();
1174 0 :
1175 0 : result.add_range(
1176 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, segsize),
1177 0 : );
1178 0 : result.add_key(segsize_key);
1179 : }
1180 : }
1181 12 : }
1182 :
1183 : // Then pg_twophase
1184 652 : result.add_key(TWOPHASEDIR_KEY);
1185 :
1186 652 : let mut xids: Vec<u64> = self
1187 652 : .list_twophase_files(lsn, ctx)
1188 652 : .await?
1189 652 : .iter()
1190 652 : .cloned()
1191 652 : .collect();
1192 652 : xids.sort_unstable();
1193 652 : for xid in xids {
1194 0 : result.add_key(twophase_file_key(xid));
1195 0 : }
1196 :
1197 652 : result.add_key(CONTROLFILE_KEY);
1198 652 : result.add_key(CHECKPOINT_KEY);
1199 652 :
1200 652 : // Add extra keyspaces in the test cases. Some test cases write keys into the storage without
1201 652 : // creating directory keys. These test cases will add such keyspaces into `extra_test_dense_keyspace`
1202 652 : // and the keys will not be garbage-colllected.
1203 652 : #[cfg(test)]
1204 652 : {
1205 652 : let guard = self.extra_test_dense_keyspace.load();
1206 652 : for kr in &guard.ranges {
1207 0 : result.add_range(kr.clone());
1208 0 : }
1209 0 : }
1210 0 :
1211 652 : let dense_keyspace = result.to_keyspace();
1212 652 : let sparse_keyspace = SparseKeySpace(KeySpace {
1213 652 : ranges: vec![
1214 652 : Key::metadata_aux_key_range(),
1215 652 : repl_origin_key_range(),
1216 652 : Key::rel_dir_sparse_key_range(),
1217 652 : ],
1218 652 : });
1219 652 :
1220 652 : if cfg!(debug_assertions) {
1221 : // Verify if the sparse keyspaces are ordered and non-overlapping.
1222 :
1223 : // We do not use KeySpaceAccum for sparse_keyspace because we want to ensure each
1224 : // category of sparse keys are split into their own image/delta files. If there
1225 : // are overlapping keyspaces, they will be automatically merged by keyspace accum,
1226 : // and we want the developer to keep the keyspaces separated.
1227 :
1228 652 : let ranges = &sparse_keyspace.0.ranges;
1229 :
1230 : // TODO: use a single overlaps_with across the codebase
1231 1956 : fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
1232 1956 : !(a.end <= b.start || b.end <= a.start)
1233 1956 : }
1234 1956 : for i in 0..ranges.len() {
1235 1956 : for j in 0..i {
1236 1956 : if overlaps_with(&ranges[i], &ranges[j]) {
1237 0 : panic!(
1238 0 : "overlapping sparse keyspace: {}..{} and {}..{}",
1239 0 : ranges[i].start, ranges[i].end, ranges[j].start, ranges[j].end
1240 0 : );
1241 1956 : }
1242 : }
1243 : }
1244 1304 : for i in 1..ranges.len() {
1245 1304 : assert!(
1246 1304 : ranges[i - 1].end <= ranges[i].start,
1247 0 : "unordered sparse keyspace: {}..{} and {}..{}",
1248 0 : ranges[i - 1].start,
1249 0 : ranges[i - 1].end,
1250 0 : ranges[i].start,
1251 0 : ranges[i].end
1252 : );
1253 : }
1254 0 : }
1255 :
1256 652 : Ok((dense_keyspace, sparse_keyspace))
1257 652 : }
1258 :
1259 : /// Get cached size of relation if it not updated after specified LSN
1260 897080 : pub fn get_cached_rel_size(&self, tag: &RelTag, lsn: Lsn) -> Option<BlockNumber> {
1261 897080 : let rel_size_cache = self.rel_size_cache.read().unwrap();
1262 897080 : if let Some((cached_lsn, nblocks)) = rel_size_cache.map.get(tag) {
1263 897036 : if lsn >= *cached_lsn {
1264 886744 : RELSIZE_CACHE_HITS.inc();
1265 886744 : return Some(*nblocks);
1266 10292 : }
1267 10292 : RELSIZE_CACHE_MISSES_OLD.inc();
1268 44 : }
1269 10336 : RELSIZE_CACHE_MISSES.inc();
1270 10336 : None
1271 897080 : }
1272 :
1273 : /// Update cached relation size if there is no more recent update
1274 10272 : pub fn update_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1275 10272 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1276 10272 :
1277 10272 : if lsn < rel_size_cache.complete_as_of {
1278 : // Do not cache old values. It's safe to cache the size on read, as long as
1279 : // the read was at an LSN since we started the WAL ingestion. Reasoning: we
1280 : // never evict values from the cache, so if the relation size changed after
1281 : // 'lsn', the new value is already in the cache.
1282 0 : return;
1283 10272 : }
1284 10272 :
1285 10272 : match rel_size_cache.map.entry(tag) {
1286 10272 : hash_map::Entry::Occupied(mut entry) => {
1287 10272 : let cached_lsn = entry.get_mut();
1288 10272 : if lsn >= cached_lsn.0 {
1289 0 : *cached_lsn = (lsn, nblocks);
1290 10272 : }
1291 : }
1292 0 : hash_map::Entry::Vacant(entry) => {
1293 0 : entry.insert((lsn, nblocks));
1294 0 : RELSIZE_CACHE_ENTRIES.inc();
1295 0 : }
1296 : }
1297 10272 : }
1298 :
1299 : /// Store cached relation size
1300 565440 : pub fn set_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1301 565440 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1302 565440 : if rel_size_cache.map.insert(tag, (lsn, nblocks)).is_none() {
1303 3840 : RELSIZE_CACHE_ENTRIES.inc();
1304 561600 : }
1305 565440 : }
1306 :
1307 : /// Remove cached relation size
1308 4 : pub fn remove_cached_rel_size(&self, tag: &RelTag) {
1309 4 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1310 4 : if rel_size_cache.map.remove(tag).is_some() {
1311 4 : RELSIZE_CACHE_ENTRIES.dec();
1312 4 : }
1313 4 : }
1314 : }
1315 :
1316 : /// DatadirModification represents an operation to ingest an atomic set of
1317 : /// updates to the repository.
1318 : ///
1319 : /// It is created by the 'begin_record' function. It is called for each WAL
1320 : /// record, so that all the modifications by a one WAL record appear atomic.
1321 : pub struct DatadirModification<'a> {
1322 : /// The timeline this modification applies to. You can access this to
1323 : /// read the state, but note that any pending updates are *not* reflected
1324 : /// in the state in 'tline' yet.
1325 : pub tline: &'a Timeline,
1326 :
1327 : /// Current LSN of the modification
1328 : lsn: Lsn,
1329 :
1330 : // The modifications are not applied directly to the underlying key-value store.
1331 : // The put-functions add the modifications here, and they are flushed to the
1332 : // underlying key-value store by the 'finish' function.
1333 : pending_lsns: Vec<Lsn>,
1334 : pending_deletions: Vec<(Range<Key>, Lsn)>,
1335 : pending_nblocks: i64,
1336 :
1337 : /// Metadata writes, indexed by key so that they can be read from not-yet-committed modifications
1338 : /// while ingesting subsequent records. See [`Self::is_data_key`] for the definition of 'metadata'.
1339 : pending_metadata_pages: HashMap<CompactKey, Vec<(Lsn, usize, Value)>>,
1340 :
1341 : /// Data writes, ready to be flushed into an ephemeral layer. See [`Self::is_data_key`] for
1342 : /// which keys are stored here.
1343 : pending_data_batch: Option<SerializedValueBatch>,
1344 :
1345 : /// For special "directory" keys that store key-value maps, track the size of the map
1346 : /// if it was updated in this modification.
1347 : pending_directory_entries: Vec<(DirectoryKind, MetricsUpdate)>,
1348 :
1349 : /// An **approximation** of how many metadata bytes will be written to the EphemeralFile.
1350 : pending_metadata_bytes: usize,
1351 : }
1352 :
1353 : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1354 : pub enum MetricsUpdate {
1355 : /// Set the metrics to this value
1356 : Set(u64),
1357 : /// Increment the metrics by this value
1358 : Add(u64),
1359 : /// Decrement the metrics by this value
1360 : Sub(u64),
1361 : }
1362 :
1363 : impl DatadirModification<'_> {
1364 : // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
1365 : // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
1366 : // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
1367 : pub(crate) const MAX_PENDING_BYTES: usize = 8 * 1024 * 1024;
1368 :
1369 : /// Get the current lsn
1370 836116 : pub(crate) fn get_lsn(&self) -> Lsn {
1371 836116 : self.lsn
1372 836116 : }
1373 :
1374 0 : pub(crate) fn approx_pending_bytes(&self) -> usize {
1375 0 : self.pending_data_batch
1376 0 : .as_ref()
1377 0 : .map_or(0, |b| b.buffer_size())
1378 0 : + self.pending_metadata_bytes
1379 0 : }
1380 :
1381 0 : pub(crate) fn has_dirty_data(&self) -> bool {
1382 0 : self.pending_data_batch
1383 0 : .as_ref()
1384 0 : .is_some_and(|b| b.has_data())
1385 0 : }
1386 :
1387 : /// Returns statistics about the currently pending modifications.
1388 0 : pub(crate) fn stats(&self) -> DatadirModificationStats {
1389 0 : let mut stats = DatadirModificationStats::default();
1390 0 : for (_, _, value) in self.pending_metadata_pages.values().flatten() {
1391 0 : match value {
1392 0 : Value::Image(_) => stats.metadata_images += 1,
1393 0 : Value::WalRecord(r) if r.will_init() => stats.metadata_images += 1,
1394 0 : Value::WalRecord(_) => stats.metadata_deltas += 1,
1395 : }
1396 : }
1397 0 : for valuemeta in self.pending_data_batch.iter().flat_map(|b| &b.metadata) {
1398 0 : match valuemeta {
1399 0 : ValueMeta::Serialized(s) if s.will_init => stats.data_images += 1,
1400 0 : ValueMeta::Serialized(_) => stats.data_deltas += 1,
1401 0 : ValueMeta::Observed(_) => {}
1402 : }
1403 : }
1404 0 : stats
1405 0 : }
1406 :
1407 : /// Set the current lsn
1408 291716 : pub(crate) fn set_lsn(&mut self, lsn: Lsn) -> anyhow::Result<()> {
1409 291716 : ensure!(
1410 291716 : lsn >= self.lsn,
1411 0 : "setting an older lsn {} than {} is not allowed",
1412 : lsn,
1413 : self.lsn
1414 : );
1415 :
1416 291716 : if lsn > self.lsn {
1417 291716 : self.pending_lsns.push(self.lsn);
1418 291716 : self.lsn = lsn;
1419 291716 : }
1420 291716 : Ok(())
1421 291716 : }
1422 :
1423 : /// In this context, 'metadata' means keys that are only read by the pageserver internally, and 'data' means
1424 : /// keys that represent literal blocks that postgres can read. So data includes relation blocks and
1425 : /// SLRU blocks, which are read directly by postgres, and everything else is considered metadata.
1426 : ///
1427 : /// The distinction is important because data keys are handled on a fast path where dirty writes are
1428 : /// not readable until this modification is committed, whereas metadata keys are visible for read
1429 : /// via [`Self::get`] as soon as their record has been ingested.
1430 1701304 : fn is_data_key(key: &Key) -> bool {
1431 1701304 : key.is_rel_block_key() || key.is_slru_block_key()
1432 1701304 : }
1433 :
1434 : /// Initialize a completely new repository.
1435 : ///
1436 : /// This inserts the directory metadata entries that are assumed to
1437 : /// always exist.
1438 424 : pub fn init_empty(&mut self) -> anyhow::Result<()> {
1439 424 : let buf = DbDirectory::ser(&DbDirectory {
1440 424 : dbdirs: HashMap::new(),
1441 424 : })?;
1442 424 : self.pending_directory_entries
1443 424 : .push((DirectoryKind::Db, MetricsUpdate::Set(0)));
1444 424 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1445 :
1446 424 : let buf = if self.tline.pg_version >= 17 {
1447 0 : TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
1448 0 : xids: HashSet::new(),
1449 0 : })
1450 : } else {
1451 424 : TwoPhaseDirectory::ser(&TwoPhaseDirectory {
1452 424 : xids: HashSet::new(),
1453 424 : })
1454 0 : }?;
1455 424 : self.pending_directory_entries
1456 424 : .push((DirectoryKind::TwoPhase, MetricsUpdate::Set(0)));
1457 424 : self.put(TWOPHASEDIR_KEY, Value::Image(buf.into()));
1458 :
1459 424 : let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
1460 424 : let empty_dir = Value::Image(buf);
1461 424 :
1462 424 : // Initialize SLRUs on shard 0 only: creating these on other shards would be
1463 424 : // harmless but they'd just be dropped on later compaction.
1464 424 : if self.tline.tenant_shard_id.is_shard_zero() {
1465 412 : self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
1466 412 : self.pending_directory_entries.push((
1467 412 : DirectoryKind::SlruSegment(SlruKind::Clog),
1468 412 : MetricsUpdate::Set(0),
1469 412 : ));
1470 412 : self.put(
1471 412 : slru_dir_to_key(SlruKind::MultiXactMembers),
1472 412 : empty_dir.clone(),
1473 412 : );
1474 412 : self.pending_directory_entries.push((
1475 412 : DirectoryKind::SlruSegment(SlruKind::Clog),
1476 412 : MetricsUpdate::Set(0),
1477 412 : ));
1478 412 : self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
1479 412 : self.pending_directory_entries.push((
1480 412 : DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets),
1481 412 : MetricsUpdate::Set(0),
1482 412 : ));
1483 412 : }
1484 :
1485 424 : Ok(())
1486 424 : }
1487 :
1488 : #[cfg(test)]
1489 420 : pub fn init_empty_test_timeline(&mut self) -> anyhow::Result<()> {
1490 420 : self.init_empty()?;
1491 420 : self.put_control_file(bytes::Bytes::from_static(
1492 420 : b"control_file contents do not matter",
1493 420 : ))
1494 420 : .context("put_control_file")?;
1495 420 : self.put_checkpoint(bytes::Bytes::from_static(
1496 420 : b"checkpoint_file contents do not matter",
1497 420 : ))
1498 420 : .context("put_checkpoint_file")?;
1499 420 : Ok(())
1500 420 : }
1501 :
1502 : /// Creates a relation if it is not already present.
1503 : /// Returns the current size of the relation
1504 836112 : pub(crate) async fn create_relation_if_required(
1505 836112 : &mut self,
1506 836112 : rel: RelTag,
1507 836112 : ctx: &RequestContext,
1508 836112 : ) -> Result<u32, PageReconstructError> {
1509 : // Get current size and put rel creation if rel doesn't exist
1510 : //
1511 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1512 : // check the cache too. This is because eagerly checking the cache results in
1513 : // less work overall and 10% better performance. It's more work on cache miss
1514 : // but cache miss is rare.
1515 836112 : if let Some(nblocks) = self.tline.get_cached_rel_size(&rel, self.get_lsn()) {
1516 836092 : Ok(nblocks)
1517 20 : } else if !self
1518 20 : .tline
1519 20 : .get_rel_exists(rel, Version::Modified(self), ctx)
1520 20 : .await?
1521 : {
1522 : // create it with 0 size initially, the logic below will extend it
1523 20 : self.put_rel_creation(rel, 0, ctx)
1524 20 : .await
1525 20 : .context("Relation Error")?;
1526 20 : Ok(0)
1527 : } else {
1528 0 : self.tline
1529 0 : .get_rel_size(rel, Version::Modified(self), ctx)
1530 0 : .await
1531 : }
1532 836112 : }
1533 :
1534 : /// Given a block number for a relation (which represents a newly written block),
1535 : /// the previous block count of the relation, and the shard info, find the gaps
1536 : /// that were created by the newly written block if any.
1537 291340 : fn find_gaps(
1538 291340 : rel: RelTag,
1539 291340 : blkno: u32,
1540 291340 : previous_nblocks: u32,
1541 291340 : shard: &ShardIdentity,
1542 291340 : ) -> Option<KeySpace> {
1543 291340 : let mut key = rel_block_to_key(rel, blkno);
1544 291340 : let mut gap_accum = None;
1545 :
1546 291340 : for gap_blkno in previous_nblocks..blkno {
1547 64 : key.field6 = gap_blkno;
1548 64 :
1549 64 : if shard.get_shard_number(&key) != shard.number {
1550 16 : continue;
1551 48 : }
1552 48 :
1553 48 : gap_accum
1554 48 : .get_or_insert_with(KeySpaceAccum::new)
1555 48 : .add_key(key);
1556 : }
1557 :
1558 291340 : gap_accum.map(|accum| accum.to_keyspace())
1559 291340 : }
1560 :
1561 291704 : pub async fn ingest_batch(
1562 291704 : &mut self,
1563 291704 : mut batch: SerializedValueBatch,
1564 291704 : // TODO(vlad): remove this argument and replace the shard check with is_key_local
1565 291704 : shard: &ShardIdentity,
1566 291704 : ctx: &RequestContext,
1567 291704 : ) -> anyhow::Result<()> {
1568 291704 : let mut gaps_at_lsns = Vec::default();
1569 :
1570 291704 : for meta in batch.metadata.iter() {
1571 291284 : let (rel, blkno) = Key::from_compact(meta.key()).to_rel_block()?;
1572 291284 : let new_nblocks = blkno + 1;
1573 :
1574 291284 : let old_nblocks = self.create_relation_if_required(rel, ctx).await?;
1575 291284 : if new_nblocks > old_nblocks {
1576 4780 : self.put_rel_extend(rel, new_nblocks, ctx).await?;
1577 286504 : }
1578 :
1579 291284 : if let Some(gaps) = Self::find_gaps(rel, blkno, old_nblocks, shard) {
1580 0 : gaps_at_lsns.push((gaps, meta.lsn()));
1581 291284 : }
1582 : }
1583 :
1584 291704 : if !gaps_at_lsns.is_empty() {
1585 0 : batch.zero_gaps(gaps_at_lsns);
1586 291704 : }
1587 :
1588 291704 : match self.pending_data_batch.as_mut() {
1589 40 : Some(pending_batch) => {
1590 40 : pending_batch.extend(batch);
1591 40 : }
1592 291664 : None if batch.has_data() => {
1593 291260 : self.pending_data_batch = Some(batch);
1594 291260 : }
1595 404 : None => {
1596 404 : // Nothing to initialize the batch with
1597 404 : }
1598 : }
1599 :
1600 291704 : Ok(())
1601 291704 : }
1602 :
1603 : /// Put a new page version that can be constructed from a WAL record
1604 : ///
1605 : /// NOTE: this will *not* implicitly extend the relation, if the page is beyond the
1606 : /// current end-of-file. It's up to the caller to check that the relation size
1607 : /// matches the blocks inserted!
1608 24 : pub fn put_rel_wal_record(
1609 24 : &mut self,
1610 24 : rel: RelTag,
1611 24 : blknum: BlockNumber,
1612 24 : rec: NeonWalRecord,
1613 24 : ) -> anyhow::Result<()> {
1614 24 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1615 24 : self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
1616 24 : Ok(())
1617 24 : }
1618 :
1619 : // Same, but for an SLRU.
1620 16 : pub fn put_slru_wal_record(
1621 16 : &mut self,
1622 16 : kind: SlruKind,
1623 16 : segno: u32,
1624 16 : blknum: BlockNumber,
1625 16 : rec: NeonWalRecord,
1626 16 : ) -> anyhow::Result<()> {
1627 16 : if !self.tline.tenant_shard_id.is_shard_zero() {
1628 0 : return Ok(());
1629 16 : }
1630 16 :
1631 16 : self.put(
1632 16 : slru_block_to_key(kind, segno, blknum),
1633 16 : Value::WalRecord(rec),
1634 16 : );
1635 16 : Ok(())
1636 16 : }
1637 :
1638 : /// Like put_wal_record, but with ready-made image of the page.
1639 555684 : pub fn put_rel_page_image(
1640 555684 : &mut self,
1641 555684 : rel: RelTag,
1642 555684 : blknum: BlockNumber,
1643 555684 : img: Bytes,
1644 555684 : ) -> anyhow::Result<()> {
1645 555684 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1646 555684 : let key = rel_block_to_key(rel, blknum);
1647 555684 : if !key.is_valid_key_on_write_path() {
1648 0 : anyhow::bail!(
1649 0 : "the request contains data not supported by pageserver at {}",
1650 0 : key
1651 0 : );
1652 555684 : }
1653 555684 : self.put(rel_block_to_key(rel, blknum), Value::Image(img));
1654 555684 : Ok(())
1655 555684 : }
1656 :
1657 12 : pub fn put_slru_page_image(
1658 12 : &mut self,
1659 12 : kind: SlruKind,
1660 12 : segno: u32,
1661 12 : blknum: BlockNumber,
1662 12 : img: Bytes,
1663 12 : ) -> anyhow::Result<()> {
1664 12 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1665 :
1666 12 : let key = slru_block_to_key(kind, segno, blknum);
1667 12 : if !key.is_valid_key_on_write_path() {
1668 0 : anyhow::bail!(
1669 0 : "the request contains data not supported by pageserver at {}",
1670 0 : key
1671 0 : );
1672 12 : }
1673 12 : self.put(key, Value::Image(img));
1674 12 : Ok(())
1675 12 : }
1676 :
1677 5996 : pub(crate) fn put_rel_page_image_zero(
1678 5996 : &mut self,
1679 5996 : rel: RelTag,
1680 5996 : blknum: BlockNumber,
1681 5996 : ) -> anyhow::Result<()> {
1682 5996 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1683 5996 : let key = rel_block_to_key(rel, blknum);
1684 5996 : if !key.is_valid_key_on_write_path() {
1685 0 : anyhow::bail!(
1686 0 : "the request contains data not supported by pageserver: {} @ {}",
1687 0 : key,
1688 0 : self.lsn
1689 0 : );
1690 5996 : }
1691 5996 :
1692 5996 : let batch = self
1693 5996 : .pending_data_batch
1694 5996 : .get_or_insert_with(SerializedValueBatch::default);
1695 5996 :
1696 5996 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1697 5996 :
1698 5996 : Ok(())
1699 5996 : }
1700 :
1701 0 : pub(crate) fn put_slru_page_image_zero(
1702 0 : &mut self,
1703 0 : kind: SlruKind,
1704 0 : segno: u32,
1705 0 : blknum: BlockNumber,
1706 0 : ) -> anyhow::Result<()> {
1707 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1708 0 : let key = slru_block_to_key(kind, segno, blknum);
1709 0 : if !key.is_valid_key_on_write_path() {
1710 0 : anyhow::bail!(
1711 0 : "the request contains data not supported by pageserver: {} @ {}",
1712 0 : key,
1713 0 : self.lsn
1714 0 : );
1715 0 : }
1716 0 :
1717 0 : let batch = self
1718 0 : .pending_data_batch
1719 0 : .get_or_insert_with(SerializedValueBatch::default);
1720 0 :
1721 0 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1722 0 :
1723 0 : Ok(())
1724 0 : }
1725 :
1726 : /// Returns `true` if the rel_size_v2 write path is enabled. If it is the first time that
1727 : /// we enable it, we also need to persist it in `index_part.json`.
1728 3892 : pub fn maybe_enable_rel_size_v2(&mut self) -> anyhow::Result<bool> {
1729 3892 : let status = self.tline.get_rel_size_v2_status();
1730 3892 : let config = self.tline.get_rel_size_v2_enabled();
1731 3892 : match (config, status) {
1732 : (false, RelSizeMigration::Legacy) => {
1733 : // tenant config didn't enable it and we didn't write any reldir_v2 key yet
1734 3892 : Ok(false)
1735 : }
1736 : (false, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
1737 : // index_part already persisted that the timeline has enabled rel_size_v2
1738 0 : Ok(true)
1739 : }
1740 : (true, RelSizeMigration::Legacy) => {
1741 : // The first time we enable it, we need to persist it in `index_part.json`
1742 0 : self.tline
1743 0 : .update_rel_size_v2_status(RelSizeMigration::Migrating)?;
1744 0 : tracing::info!("enabled rel_size_v2");
1745 0 : Ok(true)
1746 : }
1747 : (true, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
1748 : // index_part already persisted that the timeline has enabled rel_size_v2
1749 : // and we don't need to do anything
1750 0 : Ok(true)
1751 : }
1752 : }
1753 3892 : }
1754 :
1755 : /// Store a relmapper file (pg_filenode.map) in the repository
1756 32 : pub async fn put_relmap_file(
1757 32 : &mut self,
1758 32 : spcnode: Oid,
1759 32 : dbnode: Oid,
1760 32 : img: Bytes,
1761 32 : ctx: &RequestContext,
1762 32 : ) -> anyhow::Result<()> {
1763 32 : let v2_enabled = self.maybe_enable_rel_size_v2()?;
1764 :
1765 : // Add it to the directory (if it doesn't exist already)
1766 32 : let buf = self.get(DBDIR_KEY, ctx).await?;
1767 32 : let mut dbdir = DbDirectory::des(&buf)?;
1768 :
1769 32 : let r = dbdir.dbdirs.insert((spcnode, dbnode), true);
1770 32 : if r.is_none() || r == Some(false) {
1771 : // The dbdir entry didn't exist, or it contained a
1772 : // 'false'. The 'insert' call already updated it with
1773 : // 'true', now write the updated 'dbdirs' map back.
1774 32 : let buf = DbDirectory::ser(&dbdir)?;
1775 32 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1776 0 : }
1777 32 : if r.is_none() {
1778 : // Create RelDirectory
1779 : // TODO: if we have fully migrated to v2, no need to create this directory
1780 16 : let buf = RelDirectory::ser(&RelDirectory {
1781 16 : rels: HashSet::new(),
1782 16 : })?;
1783 16 : self.pending_directory_entries
1784 16 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
1785 16 : if v2_enabled {
1786 0 : self.pending_directory_entries
1787 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
1788 16 : }
1789 16 : self.put(
1790 16 : rel_dir_to_key(spcnode, dbnode),
1791 16 : Value::Image(Bytes::from(buf)),
1792 16 : );
1793 16 : }
1794 :
1795 32 : self.put(relmap_file_key(spcnode, dbnode), Value::Image(img));
1796 32 : Ok(())
1797 32 : }
1798 :
1799 0 : pub async fn put_twophase_file(
1800 0 : &mut self,
1801 0 : xid: u64,
1802 0 : img: Bytes,
1803 0 : ctx: &RequestContext,
1804 0 : ) -> anyhow::Result<()> {
1805 : // Add it to the directory entry
1806 0 : let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1807 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1808 0 : let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
1809 0 : if !dir.xids.insert(xid) {
1810 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1811 0 : }
1812 0 : self.pending_directory_entries.push((
1813 0 : DirectoryKind::TwoPhase,
1814 0 : MetricsUpdate::Set(dir.xids.len() as u64),
1815 0 : ));
1816 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1817 : } else {
1818 0 : let xid = xid as u32;
1819 0 : let mut dir = TwoPhaseDirectory::des(&dirbuf)?;
1820 0 : if !dir.xids.insert(xid) {
1821 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1822 0 : }
1823 0 : self.pending_directory_entries.push((
1824 0 : DirectoryKind::TwoPhase,
1825 0 : MetricsUpdate::Set(dir.xids.len() as u64),
1826 0 : ));
1827 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
1828 : };
1829 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
1830 0 :
1831 0 : self.put(twophase_file_key(xid), Value::Image(img));
1832 0 : Ok(())
1833 0 : }
1834 :
1835 0 : pub async fn set_replorigin(
1836 0 : &mut self,
1837 0 : origin_id: RepOriginId,
1838 0 : origin_lsn: Lsn,
1839 0 : ) -> anyhow::Result<()> {
1840 0 : let key = repl_origin_key(origin_id);
1841 0 : self.put(key, Value::Image(origin_lsn.ser().unwrap().into()));
1842 0 : Ok(())
1843 0 : }
1844 :
1845 0 : pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> anyhow::Result<()> {
1846 0 : self.set_replorigin(origin_id, Lsn::INVALID).await
1847 0 : }
1848 :
1849 424 : pub fn put_control_file(&mut self, img: Bytes) -> anyhow::Result<()> {
1850 424 : self.put(CONTROLFILE_KEY, Value::Image(img));
1851 424 : Ok(())
1852 424 : }
1853 :
1854 452 : pub fn put_checkpoint(&mut self, img: Bytes) -> anyhow::Result<()> {
1855 452 : self.put(CHECKPOINT_KEY, Value::Image(img));
1856 452 : Ok(())
1857 452 : }
1858 :
1859 0 : pub async fn drop_dbdir(
1860 0 : &mut self,
1861 0 : spcnode: Oid,
1862 0 : dbnode: Oid,
1863 0 : ctx: &RequestContext,
1864 0 : ) -> anyhow::Result<()> {
1865 0 : let total_blocks = self
1866 0 : .tline
1867 0 : .get_db_size(spcnode, dbnode, Version::Modified(self), ctx)
1868 0 : .await?;
1869 :
1870 : // Remove entry from dbdir
1871 0 : let buf = self.get(DBDIR_KEY, ctx).await?;
1872 0 : let mut dir = DbDirectory::des(&buf)?;
1873 0 : if dir.dbdirs.remove(&(spcnode, dbnode)).is_some() {
1874 0 : let buf = DbDirectory::ser(&dir)?;
1875 0 : self.pending_directory_entries.push((
1876 0 : DirectoryKind::Db,
1877 0 : MetricsUpdate::Set(dir.dbdirs.len() as u64),
1878 0 : ));
1879 0 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1880 : } else {
1881 0 : warn!(
1882 0 : "dropped dbdir for spcnode {} dbnode {} did not exist in db directory",
1883 : spcnode, dbnode
1884 : );
1885 : }
1886 :
1887 : // Update logical database size.
1888 0 : self.pending_nblocks -= total_blocks as i64;
1889 0 :
1890 0 : // Delete all relations and metadata files for the spcnode/dnode
1891 0 : self.delete(dbdir_key_range(spcnode, dbnode));
1892 0 : Ok(())
1893 0 : }
1894 :
1895 : /// Create a relation fork.
1896 : ///
1897 : /// 'nblocks' is the initial size.
1898 3840 : pub async fn put_rel_creation(
1899 3840 : &mut self,
1900 3840 : rel: RelTag,
1901 3840 : nblocks: BlockNumber,
1902 3840 : ctx: &RequestContext,
1903 3840 : ) -> Result<(), RelationError> {
1904 3840 : if rel.relnode == 0 {
1905 0 : return Err(RelationError::InvalidRelnode);
1906 3840 : }
1907 : // It's possible that this is the first rel for this db in this
1908 : // tablespace. Create the reldir entry for it if so.
1909 3840 : let mut dbdir = DbDirectory::des(&self.get(DBDIR_KEY, ctx).await.context("read db")?)
1910 3840 : .context("deserialize db")?;
1911 :
1912 3840 : let dbdir_exists =
1913 3840 : if let hash_map::Entry::Vacant(e) = dbdir.dbdirs.entry((rel.spcnode, rel.dbnode)) {
1914 : // Didn't exist. Update dbdir
1915 16 : e.insert(false);
1916 16 : let buf = DbDirectory::ser(&dbdir).context("serialize db")?;
1917 16 : self.pending_directory_entries.push((
1918 16 : DirectoryKind::Db,
1919 16 : MetricsUpdate::Set(dbdir.dbdirs.len() as u64),
1920 16 : ));
1921 16 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1922 16 : false
1923 : } else {
1924 3824 : true
1925 : };
1926 :
1927 3840 : let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1928 3840 : let mut rel_dir = if !dbdir_exists {
1929 : // Create the RelDirectory
1930 16 : RelDirectory::default()
1931 : } else {
1932 : // reldir already exists, fetch it
1933 3824 : RelDirectory::des(&self.get(rel_dir_key, ctx).await.context("read db")?)
1934 3824 : .context("deserialize db")?
1935 : };
1936 :
1937 : // Add the new relation to the rel directory entry, and write it back
1938 3840 : if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
1939 0 : return Err(RelationError::AlreadyExists);
1940 3840 : }
1941 :
1942 3840 : let v2_enabled = self.maybe_enable_rel_size_v2()?;
1943 :
1944 3840 : if v2_enabled {
1945 0 : let sparse_rel_dir_key =
1946 0 : rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
1947 : // check if the rel_dir_key exists in v2
1948 0 : let val = self
1949 0 : .sparse_get(sparse_rel_dir_key, ctx)
1950 0 : .await
1951 0 : .map_err(|e| RelationError::Other(e.into()))?;
1952 0 : let val = RelDirExists::decode_option(val)
1953 0 : .map_err(|_| RelationError::Other(anyhow::anyhow!("invalid reldir key")))?;
1954 0 : if val == RelDirExists::Exists {
1955 0 : return Err(RelationError::AlreadyExists);
1956 0 : }
1957 0 : self.put(
1958 0 : sparse_rel_dir_key,
1959 0 : Value::Image(RelDirExists::Exists.encode()),
1960 0 : );
1961 0 : if !dbdir_exists {
1962 0 : self.pending_directory_entries
1963 0 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
1964 0 : self.pending_directory_entries
1965 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
1966 0 : // We don't write `rel_dir_key -> rel_dir.rels` back to the storage in the v2 path unless it's the initial creation.
1967 0 : // TODO: if we have fully migrated to v2, no need to create this directory. Otherwise, there
1968 0 : // will be key not found errors if we don't create an empty one for rel_size_v2.
1969 0 : self.put(
1970 0 : rel_dir_key,
1971 0 : Value::Image(Bytes::from(
1972 0 : RelDirectory::ser(&RelDirectory::default()).context("serialize")?,
1973 : )),
1974 : );
1975 0 : }
1976 0 : self.pending_directory_entries
1977 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
1978 : } else {
1979 3840 : if !dbdir_exists {
1980 16 : self.pending_directory_entries
1981 16 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
1982 3824 : }
1983 3840 : self.pending_directory_entries
1984 3840 : .push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
1985 3840 : self.put(
1986 3840 : rel_dir_key,
1987 3840 : Value::Image(Bytes::from(
1988 3840 : RelDirectory::ser(&rel_dir).context("serialize")?,
1989 : )),
1990 : );
1991 : }
1992 : // Put size
1993 3840 : let size_key = rel_size_to_key(rel);
1994 3840 : let buf = nblocks.to_le_bytes();
1995 3840 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1996 3840 :
1997 3840 : self.pending_nblocks += nblocks as i64;
1998 3840 :
1999 3840 : // Update relation size cache
2000 3840 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2001 3840 :
2002 3840 : // Even if nblocks > 0, we don't insert any actual blocks here. That's up to the
2003 3840 : // caller.
2004 3840 : Ok(())
2005 3840 : }
2006 :
2007 : /// Truncate relation
2008 12024 : pub async fn put_rel_truncation(
2009 12024 : &mut self,
2010 12024 : rel: RelTag,
2011 12024 : nblocks: BlockNumber,
2012 12024 : ctx: &RequestContext,
2013 12024 : ) -> anyhow::Result<()> {
2014 12024 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
2015 12024 : if self
2016 12024 : .tline
2017 12024 : .get_rel_exists(rel, Version::Modified(self), ctx)
2018 12024 : .await?
2019 : {
2020 12024 : let size_key = rel_size_to_key(rel);
2021 : // Fetch the old size first
2022 12024 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2023 12024 :
2024 12024 : // Update the entry with the new size.
2025 12024 : let buf = nblocks.to_le_bytes();
2026 12024 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2027 12024 :
2028 12024 : // Update relation size cache
2029 12024 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2030 12024 :
2031 12024 : // Update logical database size.
2032 12024 : self.pending_nblocks -= old_size as i64 - nblocks as i64;
2033 0 : }
2034 12024 : Ok(())
2035 12024 : }
2036 :
2037 : /// Extend relation
2038 : /// If new size is smaller, do nothing.
2039 553360 : pub async fn put_rel_extend(
2040 553360 : &mut self,
2041 553360 : rel: RelTag,
2042 553360 : nblocks: BlockNumber,
2043 553360 : ctx: &RequestContext,
2044 553360 : ) -> anyhow::Result<()> {
2045 553360 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
2046 :
2047 : // Put size
2048 553360 : let size_key = rel_size_to_key(rel);
2049 553360 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2050 553360 :
2051 553360 : // only extend relation here. never decrease the size
2052 553360 : if nblocks > old_size {
2053 549576 : let buf = nblocks.to_le_bytes();
2054 549576 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2055 549576 :
2056 549576 : // Update relation size cache
2057 549576 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2058 549576 :
2059 549576 : self.pending_nblocks += nblocks as i64 - old_size as i64;
2060 549576 : }
2061 553360 : Ok(())
2062 553360 : }
2063 :
2064 : /// Drop some relations
2065 20 : pub(crate) async fn put_rel_drops(
2066 20 : &mut self,
2067 20 : drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
2068 20 : ctx: &RequestContext,
2069 20 : ) -> anyhow::Result<()> {
2070 20 : let v2_enabled = self.maybe_enable_rel_size_v2()?;
2071 24 : for ((spc_node, db_node), rel_tags) in drop_relations {
2072 4 : let dir_key = rel_dir_to_key(spc_node, db_node);
2073 4 : let buf = self.get(dir_key, ctx).await?;
2074 4 : let mut dir = RelDirectory::des(&buf)?;
2075 :
2076 4 : let mut dirty = false;
2077 8 : for rel_tag in rel_tags {
2078 4 : let found = if dir.rels.remove(&(rel_tag.relnode, rel_tag.forknum)) {
2079 4 : self.pending_directory_entries
2080 4 : .push((DirectoryKind::Rel, MetricsUpdate::Sub(1)));
2081 4 : dirty = true;
2082 4 : true
2083 0 : } else if v2_enabled {
2084 : // The rel is not found in the old reldir key, so we need to check the new sparse keyspace.
2085 : // Note that a relation can only exist in one of the two keyspaces (guaranteed by the ingestion
2086 : // logic).
2087 0 : let key =
2088 0 : rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
2089 0 : let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
2090 0 : .map_err(|_| RelationError::Other(anyhow::anyhow!("invalid reldir key")))?;
2091 0 : if val == RelDirExists::Exists {
2092 0 : self.pending_directory_entries
2093 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
2094 0 : // put tombstone
2095 0 : self.put(key, Value::Image(RelDirExists::Removed.encode()));
2096 0 : // no need to set dirty to true
2097 0 : true
2098 : } else {
2099 0 : false
2100 : }
2101 : } else {
2102 0 : false
2103 : };
2104 :
2105 4 : if found {
2106 : // update logical size
2107 4 : let size_key = rel_size_to_key(rel_tag);
2108 4 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2109 4 : self.pending_nblocks -= old_size as i64;
2110 4 :
2111 4 : // Remove entry from relation size cache
2112 4 : self.tline.remove_cached_rel_size(&rel_tag);
2113 4 :
2114 4 : // Delete size entry, as well as all blocks
2115 4 : self.delete(rel_key_range(rel_tag));
2116 0 : }
2117 : }
2118 :
2119 4 : if dirty {
2120 4 : self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
2121 0 : }
2122 : }
2123 :
2124 20 : Ok(())
2125 20 : }
2126 :
2127 12 : pub async fn put_slru_segment_creation(
2128 12 : &mut self,
2129 12 : kind: SlruKind,
2130 12 : segno: u32,
2131 12 : nblocks: BlockNumber,
2132 12 : ctx: &RequestContext,
2133 12 : ) -> anyhow::Result<()> {
2134 12 : assert!(self.tline.tenant_shard_id.is_shard_zero());
2135 :
2136 : // Add it to the directory entry
2137 12 : let dir_key = slru_dir_to_key(kind);
2138 12 : let buf = self.get(dir_key, ctx).await?;
2139 12 : let mut dir = SlruSegmentDirectory::des(&buf)?;
2140 :
2141 12 : if !dir.segments.insert(segno) {
2142 0 : anyhow::bail!("slru segment {kind:?}/{segno} already exists");
2143 12 : }
2144 12 : self.pending_directory_entries.push((
2145 12 : DirectoryKind::SlruSegment(kind),
2146 12 : MetricsUpdate::Set(dir.segments.len() as u64),
2147 12 : ));
2148 12 : self.put(
2149 12 : dir_key,
2150 12 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
2151 : );
2152 :
2153 : // Put size
2154 12 : let size_key = slru_segment_size_to_key(kind, segno);
2155 12 : let buf = nblocks.to_le_bytes();
2156 12 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2157 12 :
2158 12 : // even if nblocks > 0, we don't insert any actual blocks here
2159 12 :
2160 12 : Ok(())
2161 12 : }
2162 :
2163 : /// Extend SLRU segment
2164 0 : pub fn put_slru_extend(
2165 0 : &mut self,
2166 0 : kind: SlruKind,
2167 0 : segno: u32,
2168 0 : nblocks: BlockNumber,
2169 0 : ) -> anyhow::Result<()> {
2170 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
2171 :
2172 : // Put size
2173 0 : let size_key = slru_segment_size_to_key(kind, segno);
2174 0 : let buf = nblocks.to_le_bytes();
2175 0 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2176 0 : Ok(())
2177 0 : }
2178 :
2179 : /// This method is used for marking truncated SLRU files
2180 0 : pub async fn drop_slru_segment(
2181 0 : &mut self,
2182 0 : kind: SlruKind,
2183 0 : segno: u32,
2184 0 : ctx: &RequestContext,
2185 0 : ) -> anyhow::Result<()> {
2186 0 : // Remove it from the directory entry
2187 0 : let dir_key = slru_dir_to_key(kind);
2188 0 : let buf = self.get(dir_key, ctx).await?;
2189 0 : let mut dir = SlruSegmentDirectory::des(&buf)?;
2190 :
2191 0 : if !dir.segments.remove(&segno) {
2192 0 : warn!("slru segment {:?}/{} does not exist", kind, segno);
2193 0 : }
2194 0 : self.pending_directory_entries.push((
2195 0 : DirectoryKind::SlruSegment(kind),
2196 0 : MetricsUpdate::Set(dir.segments.len() as u64),
2197 0 : ));
2198 0 : self.put(
2199 0 : dir_key,
2200 0 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
2201 : );
2202 :
2203 : // Delete size entry, as well as all blocks
2204 0 : self.delete(slru_segment_key_range(kind, segno));
2205 0 :
2206 0 : Ok(())
2207 0 : }
2208 :
2209 : /// Drop a relmapper file (pg_filenode.map)
2210 0 : pub fn drop_relmap_file(&mut self, _spcnode: Oid, _dbnode: Oid) -> anyhow::Result<()> {
2211 0 : // TODO
2212 0 : Ok(())
2213 0 : }
2214 :
2215 : /// This method is used for marking truncated SLRU files
2216 0 : pub async fn drop_twophase_file(
2217 0 : &mut self,
2218 0 : xid: u64,
2219 0 : ctx: &RequestContext,
2220 0 : ) -> anyhow::Result<()> {
2221 : // Remove it from the directory entry
2222 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
2223 0 : let newdirbuf = if self.tline.pg_version >= 17 {
2224 0 : let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
2225 :
2226 0 : if !dir.xids.remove(&xid) {
2227 0 : warn!("twophase file for xid {} does not exist", xid);
2228 0 : }
2229 0 : self.pending_directory_entries.push((
2230 0 : DirectoryKind::TwoPhase,
2231 0 : MetricsUpdate::Set(dir.xids.len() as u64),
2232 0 : ));
2233 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
2234 : } else {
2235 0 : let xid: u32 = u32::try_from(xid)?;
2236 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
2237 :
2238 0 : if !dir.xids.remove(&xid) {
2239 0 : warn!("twophase file for xid {} does not exist", xid);
2240 0 : }
2241 0 : self.pending_directory_entries.push((
2242 0 : DirectoryKind::TwoPhase,
2243 0 : MetricsUpdate::Set(dir.xids.len() as u64),
2244 0 : ));
2245 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
2246 : };
2247 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
2248 0 :
2249 0 : // Delete it
2250 0 : self.delete(twophase_key_range(xid));
2251 0 :
2252 0 : Ok(())
2253 0 : }
2254 :
2255 32 : pub async fn put_file(
2256 32 : &mut self,
2257 32 : path: &str,
2258 32 : content: &[u8],
2259 32 : ctx: &RequestContext,
2260 32 : ) -> anyhow::Result<()> {
2261 32 : let key = aux_file::encode_aux_file_key(path);
2262 : // retrieve the key from the engine
2263 32 : let old_val = match self.get(key, ctx).await {
2264 8 : Ok(val) => Some(val),
2265 24 : Err(PageReconstructError::MissingKey(_)) => None,
2266 0 : Err(e) => return Err(e.into()),
2267 : };
2268 32 : let files: Vec<(&str, &[u8])> = if let Some(ref old_val) = old_val {
2269 8 : aux_file::decode_file_value(old_val)?
2270 : } else {
2271 24 : Vec::new()
2272 : };
2273 32 : let mut other_files = Vec::with_capacity(files.len());
2274 32 : let mut modifying_file = None;
2275 40 : for file @ (p, content) in files {
2276 8 : if path == p {
2277 8 : assert!(
2278 8 : modifying_file.is_none(),
2279 0 : "duplicated entries found for {}",
2280 : path
2281 : );
2282 8 : modifying_file = Some(content);
2283 0 : } else {
2284 0 : other_files.push(file);
2285 0 : }
2286 : }
2287 32 : let mut new_files = other_files;
2288 32 : match (modifying_file, content.is_empty()) {
2289 4 : (Some(old_content), false) => {
2290 4 : self.tline
2291 4 : .aux_file_size_estimator
2292 4 : .on_update(old_content.len(), content.len());
2293 4 : new_files.push((path, content));
2294 4 : }
2295 4 : (Some(old_content), true) => {
2296 4 : self.tline
2297 4 : .aux_file_size_estimator
2298 4 : .on_remove(old_content.len());
2299 4 : // not adding the file key to the final `new_files` vec.
2300 4 : }
2301 24 : (None, false) => {
2302 24 : self.tline.aux_file_size_estimator.on_add(content.len());
2303 24 : new_files.push((path, content));
2304 24 : }
2305 : // Compute may request delete of old version of pgstat AUX file if new one exceeds size limit.
2306 : // Compute doesn't know if previous version of this file exists or not, so
2307 : // attempt to delete non-existing file can cause this message.
2308 : // To avoid false alarms, log it as info rather than warning.
2309 0 : (None, true) if path.starts_with("pg_stat/") => {
2310 0 : info!("removing non-existing pg_stat file: {}", path)
2311 : }
2312 0 : (None, true) => warn!("removing non-existing aux file: {}", path),
2313 : }
2314 32 : let new_val = aux_file::encode_file_value(&new_files)?;
2315 32 : self.put(key, Value::Image(new_val.into()));
2316 32 :
2317 32 : Ok(())
2318 32 : }
2319 :
2320 : ///
2321 : /// Flush changes accumulated so far to the underlying repository.
2322 : ///
2323 : /// Usually, changes made in DatadirModification are atomic, but this allows
2324 : /// you to flush them to the underlying repository before the final `commit`.
2325 : /// That allows to free up the memory used to hold the pending changes.
2326 : ///
2327 : /// Currently only used during bulk import of a data directory. In that
2328 : /// context, breaking the atomicity is OK. If the import is interrupted, the
2329 : /// whole import fails and the timeline will be deleted anyway.
2330 : /// (Or to be precise, it will be left behind for debugging purposes and
2331 : /// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
2332 : ///
2333 : /// Note: A consequence of flushing the pending operations is that they
2334 : /// won't be visible to subsequent operations until `commit`. The function
2335 : /// retains all the metadata, but data pages are flushed. That's again OK
2336 : /// for bulk import, where you are just loading data pages and won't try to
2337 : /// modify the same pages twice.
2338 3860 : pub(crate) async fn flush(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2339 3860 : // Unless we have accumulated a decent amount of changes, it's not worth it
2340 3860 : // to scan through the pending_updates list.
2341 3860 : let pending_nblocks = self.pending_nblocks;
2342 3860 : if pending_nblocks < 10000 {
2343 3860 : return Ok(());
2344 0 : }
2345 :
2346 0 : let mut writer = self.tline.writer().await;
2347 :
2348 : // Flush relation and SLRU data blocks, keep metadata.
2349 0 : if let Some(batch) = self.pending_data_batch.take() {
2350 0 : tracing::debug!(
2351 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2352 0 : batch.max_lsn,
2353 0 : self.tline.get_last_record_lsn()
2354 : );
2355 :
2356 : // This bails out on first error without modifying pending_updates.
2357 : // That's Ok, cf this function's doc comment.
2358 0 : writer.put_batch(batch, ctx).await?;
2359 0 : }
2360 :
2361 0 : if pending_nblocks != 0 {
2362 0 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2363 0 : self.pending_nblocks = 0;
2364 0 : }
2365 :
2366 0 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2367 0 : writer.update_directory_entries_count(kind, count);
2368 0 : }
2369 :
2370 0 : Ok(())
2371 3860 : }
2372 :
2373 : ///
2374 : /// Finish this atomic update, writing all the updated keys to the
2375 : /// underlying timeline.
2376 : /// All the modifications in this atomic update are stamped by the specified LSN.
2377 : ///
2378 1486196 : pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2379 1486196 : let mut writer = self.tline.writer().await;
2380 :
2381 1486196 : let pending_nblocks = self.pending_nblocks;
2382 1486196 : self.pending_nblocks = 0;
2383 :
2384 : // Ordering: the items in this batch do not need to be in any global order, but values for
2385 : // a particular Key must be in Lsn order relative to one another. InMemoryLayer relies on
2386 : // this to do efficient updates to its index. See [`wal_decoder::serialized_batch`] for
2387 : // more details.
2388 :
2389 1486196 : let metadata_batch = {
2390 1486196 : let pending_meta = self
2391 1486196 : .pending_metadata_pages
2392 1486196 : .drain()
2393 1486196 : .flat_map(|(key, values)| {
2394 548092 : values
2395 548092 : .into_iter()
2396 548092 : .map(move |(lsn, value_size, value)| (key, lsn, value_size, value))
2397 1486196 : })
2398 1486196 : .collect::<Vec<_>>();
2399 1486196 :
2400 1486196 : if pending_meta.is_empty() {
2401 944556 : None
2402 : } else {
2403 541640 : Some(SerializedValueBatch::from_values(pending_meta))
2404 : }
2405 : };
2406 :
2407 1486196 : let data_batch = self.pending_data_batch.take();
2408 :
2409 1486196 : let maybe_batch = match (data_batch, metadata_batch) {
2410 529112 : (Some(mut data), Some(metadata)) => {
2411 529112 : data.extend(metadata);
2412 529112 : Some(data)
2413 : }
2414 286524 : (Some(data), None) => Some(data),
2415 12528 : (None, Some(metadata)) => Some(metadata),
2416 658032 : (None, None) => None,
2417 : };
2418 :
2419 1486196 : if let Some(batch) = maybe_batch {
2420 828164 : tracing::debug!(
2421 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2422 0 : batch.max_lsn,
2423 0 : self.tline.get_last_record_lsn()
2424 : );
2425 :
2426 : // This bails out on first error without modifying pending_updates.
2427 : // That's Ok, cf this function's doc comment.
2428 828164 : writer.put_batch(batch, ctx).await?;
2429 658032 : }
2430 :
2431 1486196 : if !self.pending_deletions.is_empty() {
2432 4 : writer.delete_batch(&self.pending_deletions, ctx).await?;
2433 4 : self.pending_deletions.clear();
2434 1486192 : }
2435 :
2436 1486196 : self.pending_lsns.push(self.lsn);
2437 1777912 : for pending_lsn in self.pending_lsns.drain(..) {
2438 1777912 : // TODO(vlad): pretty sure the comment below is not valid anymore
2439 1777912 : // and we can call finish write with the latest LSN
2440 1777912 : //
2441 1777912 : // Ideally, we should be able to call writer.finish_write() only once
2442 1777912 : // with the highest LSN. However, the last_record_lsn variable in the
2443 1777912 : // timeline keeps track of the latest LSN and the immediate previous LSN
2444 1777912 : // so we need to record every LSN to not leave a gap between them.
2445 1777912 : writer.finish_write(pending_lsn);
2446 1777912 : }
2447 :
2448 1486196 : if pending_nblocks != 0 {
2449 541140 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2450 945056 : }
2451 :
2452 1486196 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2453 5988 : writer.update_directory_entries_count(kind, count);
2454 5988 : }
2455 :
2456 1486196 : self.pending_metadata_bytes = 0;
2457 1486196 :
2458 1486196 : Ok(())
2459 1486196 : }
2460 :
2461 583408 : pub(crate) fn len(&self) -> usize {
2462 583408 : self.pending_metadata_pages.len()
2463 583408 : + self.pending_data_batch.as_ref().map_or(0, |b| b.len())
2464 583408 : + self.pending_deletions.len()
2465 583408 : }
2466 :
2467 : /// Read a page from the Timeline we are writing to. For metadata pages, this passes through
2468 : /// a cache in Self, which makes writes earlier in this modification visible to WAL records later
2469 : /// in the modification.
2470 : ///
2471 : /// For data pages, reads pass directly to the owning Timeline: any ingest code which reads a data
2472 : /// page must ensure that the pages they read are already committed in Timeline, for example
2473 : /// DB create operations are always preceded by a call to commit(). This is special cased because
2474 : /// it's rare: all the 'normal' WAL operations will only read metadata pages such as relation sizes,
2475 : /// and not data pages.
2476 573172 : async fn get(&self, key: Key, ctx: &RequestContext) -> Result<Bytes, PageReconstructError> {
2477 573172 : if !Self::is_data_key(&key) {
2478 : // Have we already updated the same key? Read the latest pending updated
2479 : // version in that case.
2480 : //
2481 : // Note: we don't check pending_deletions. It is an error to request a
2482 : // value that has been removed, deletion only avoids leaking storage.
2483 573172 : if let Some(values) = self.pending_metadata_pages.get(&key.to_compact()) {
2484 31856 : if let Some((_, _, value)) = values.last() {
2485 31856 : return if let Value::Image(img) = value {
2486 31856 : Ok(img.clone())
2487 : } else {
2488 : // Currently, we never need to read back a WAL record that we
2489 : // inserted in the same "transaction". All the metadata updates
2490 : // work directly with Images, and we never need to read actual
2491 : // data pages. We could handle this if we had to, by calling
2492 : // the walredo manager, but let's keep it simple for now.
2493 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
2494 0 : "unexpected pending WAL record"
2495 0 : )))
2496 : };
2497 0 : }
2498 541316 : }
2499 : } else {
2500 : // This is an expensive check, so we only do it in debug mode. If reading a data key,
2501 : // this key should never be present in pending_data_pages. We ensure this by committing
2502 : // modifications before ingesting DB create operations, which are the only kind that reads
2503 : // data pages during ingest.
2504 0 : if cfg!(debug_assertions) {
2505 0 : assert!(
2506 0 : !self
2507 0 : .pending_data_batch
2508 0 : .as_ref()
2509 0 : .is_some_and(|b| b.updates_key(&key))
2510 0 : );
2511 0 : }
2512 : }
2513 :
2514 : // Metadata page cache miss, or we're reading a data page.
2515 541316 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
2516 541316 : self.tline.get(key, lsn, ctx).await
2517 573172 : }
2518 :
2519 : /// Get a key from the sparse keyspace. Automatically converts the missing key error
2520 : /// and the empty value into None.
2521 0 : async fn sparse_get(
2522 0 : &self,
2523 0 : key: Key,
2524 0 : ctx: &RequestContext,
2525 0 : ) -> Result<Option<Bytes>, PageReconstructError> {
2526 0 : let val = self.get(key, ctx).await;
2527 0 : match val {
2528 0 : Ok(val) if val.is_empty() => Ok(None),
2529 0 : Ok(val) => Ok(Some(val)),
2530 0 : Err(PageReconstructError::MissingKey(_)) => Ok(None),
2531 0 : Err(e) => Err(e),
2532 : }
2533 0 : }
2534 :
2535 1128132 : fn put(&mut self, key: Key, val: Value) {
2536 1128132 : if Self::is_data_key(&key) {
2537 555736 : self.put_data(key.to_compact(), val)
2538 : } else {
2539 572396 : self.put_metadata(key.to_compact(), val)
2540 : }
2541 1128132 : }
2542 :
2543 555736 : fn put_data(&mut self, key: CompactKey, val: Value) {
2544 555736 : let batch = self
2545 555736 : .pending_data_batch
2546 555736 : .get_or_insert_with(SerializedValueBatch::default);
2547 555736 : batch.put(key, val, self.lsn);
2548 555736 : }
2549 :
2550 572396 : fn put_metadata(&mut self, key: CompactKey, val: Value) {
2551 572396 : let values = self.pending_metadata_pages.entry(key).or_default();
2552 : // Replace the previous value if it exists at the same lsn
2553 572396 : if let Some((last_lsn, last_value_ser_size, last_value)) = values.last_mut() {
2554 24304 : if *last_lsn == self.lsn {
2555 : // Update the pending_metadata_bytes contribution from this entry, and update the serialized size in place
2556 24304 : self.pending_metadata_bytes -= *last_value_ser_size;
2557 24304 : *last_value_ser_size = val.serialized_size().unwrap() as usize;
2558 24304 : self.pending_metadata_bytes += *last_value_ser_size;
2559 24304 :
2560 24304 : // Use the latest value, this replaces any earlier write to the same (key,lsn), such as much
2561 24304 : // have been generated by synthesized zero page writes prior to the first real write to a page.
2562 24304 : *last_value = val;
2563 24304 : return;
2564 0 : }
2565 548092 : }
2566 :
2567 548092 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2568 548092 : self.pending_metadata_bytes += val_serialized_size;
2569 548092 : values.push((self.lsn, val_serialized_size, val));
2570 548092 :
2571 548092 : if key == CHECKPOINT_KEY.to_compact() {
2572 452 : tracing::debug!("Checkpoint key added to pending with size {val_serialized_size}");
2573 547640 : }
2574 572396 : }
2575 :
2576 4 : fn delete(&mut self, key_range: Range<Key>) {
2577 4 : trace!("DELETE {}-{}", key_range.start, key_range.end);
2578 4 : self.pending_deletions.push((key_range, self.lsn));
2579 4 : }
2580 : }
2581 :
2582 : /// Statistics for a DatadirModification.
2583 : #[derive(Default)]
2584 : pub struct DatadirModificationStats {
2585 : pub metadata_images: u64,
2586 : pub metadata_deltas: u64,
2587 : pub data_images: u64,
2588 : pub data_deltas: u64,
2589 : }
2590 :
2591 : /// This struct facilitates accessing either a committed key from the timeline at a
2592 : /// specific LSN, or the latest uncommitted key from a pending modification.
2593 : ///
2594 : /// During WAL ingestion, the records from multiple LSNs may be batched in the same
2595 : /// modification before being flushed to the timeline. Hence, the routines in WalIngest
2596 : /// need to look up the keys in the modification first before looking them up in the
2597 : /// timeline to not miss the latest updates.
2598 : #[derive(Clone, Copy)]
2599 : pub enum Version<'a> {
2600 : Lsn(Lsn),
2601 : Modified(&'a DatadirModification<'a>),
2602 : }
2603 :
2604 : impl Version<'_> {
2605 10352 : async fn get(
2606 10352 : &self,
2607 10352 : timeline: &Timeline,
2608 10352 : key: Key,
2609 10352 : ctx: &RequestContext,
2610 10352 : ) -> Result<Bytes, PageReconstructError> {
2611 10352 : match self {
2612 10312 : Version::Lsn(lsn) => timeline.get(key, *lsn, ctx).await,
2613 40 : Version::Modified(modification) => modification.get(key, ctx).await,
2614 : }
2615 10352 : }
2616 :
2617 : /// Get a key from the sparse keyspace. Automatically converts the missing key error
2618 : /// and the empty value into None.
2619 0 : async fn sparse_get(
2620 0 : &self,
2621 0 : timeline: &Timeline,
2622 0 : key: Key,
2623 0 : ctx: &RequestContext,
2624 0 : ) -> Result<Option<Bytes>, PageReconstructError> {
2625 0 : let val = self.get(timeline, key, ctx).await;
2626 0 : match val {
2627 0 : Ok(val) if val.is_empty() => Ok(None),
2628 0 : Ok(val) => Ok(Some(val)),
2629 0 : Err(PageReconstructError::MissingKey(_)) => Ok(None),
2630 0 : Err(e) => Err(e),
2631 : }
2632 0 : }
2633 :
2634 71240 : fn get_lsn(&self) -> Lsn {
2635 71240 : match self {
2636 59148 : Version::Lsn(lsn) => *lsn,
2637 12092 : Version::Modified(modification) => modification.lsn,
2638 : }
2639 71240 : }
2640 : }
2641 :
2642 : //--- Metadata structs stored in key-value pairs in the repository.
2643 :
2644 0 : #[derive(Debug, Serialize, Deserialize)]
2645 : pub(crate) struct DbDirectory {
2646 : // (spcnode, dbnode) -> (do relmapper and PG_VERSION files exist)
2647 : pub(crate) dbdirs: HashMap<(Oid, Oid), bool>,
2648 : }
2649 :
2650 : // The format of TwoPhaseDirectory changed in PostgreSQL v17, because the filenames of
2651 : // pg_twophase files was expanded from 32-bit XIDs to 64-bit XIDs. Previously, the files
2652 : // were named like "pg_twophase/000002E5", now they're like
2653 : // "pg_twophsae/0000000A000002E4".
2654 :
2655 0 : #[derive(Debug, Serialize, Deserialize)]
2656 : pub(crate) struct TwoPhaseDirectory {
2657 : pub(crate) xids: HashSet<TransactionId>,
2658 : }
2659 :
2660 0 : #[derive(Debug, Serialize, Deserialize)]
2661 : struct TwoPhaseDirectoryV17 {
2662 : xids: HashSet<u64>,
2663 : }
2664 :
2665 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2666 : pub(crate) struct RelDirectory {
2667 : // Set of relations that exist. (relfilenode, forknum)
2668 : //
2669 : // TODO: Store it as a btree or radix tree or something else that spans multiple
2670 : // key-value pairs, if you have a lot of relations
2671 : pub(crate) rels: HashSet<(Oid, u8)>,
2672 : }
2673 :
2674 0 : #[derive(Debug, Serialize, Deserialize)]
2675 : struct RelSizeEntry {
2676 : nblocks: u32,
2677 : }
2678 :
2679 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2680 : pub(crate) struct SlruSegmentDirectory {
2681 : // Set of SLRU segments that exist.
2682 : pub(crate) segments: HashSet<u32>,
2683 : }
2684 :
2685 : #[derive(Copy, Clone, PartialEq, Eq, Debug, enum_map::Enum)]
2686 : #[repr(u8)]
2687 : pub(crate) enum DirectoryKind {
2688 : Db,
2689 : TwoPhase,
2690 : Rel,
2691 : AuxFiles,
2692 : SlruSegment(SlruKind),
2693 : RelV2,
2694 : }
2695 :
2696 : impl DirectoryKind {
2697 : pub(crate) const KINDS_NUM: usize = <DirectoryKind as Enum>::LENGTH;
2698 17968 : pub(crate) fn offset(&self) -> usize {
2699 17968 : self.into_usize()
2700 17968 : }
2701 : }
2702 :
2703 : static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]);
2704 :
2705 : #[allow(clippy::bool_assert_comparison)]
2706 : #[cfg(test)]
2707 : mod tests {
2708 : use hex_literal::hex;
2709 : use pageserver_api::models::ShardParameters;
2710 : use pageserver_api::shard::ShardStripeSize;
2711 : use utils::id::TimelineId;
2712 : use utils::shard::{ShardCount, ShardNumber};
2713 :
2714 : use super::*;
2715 : use crate::DEFAULT_PG_VERSION;
2716 : use crate::tenant::harness::TenantHarness;
2717 :
2718 : /// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
2719 : #[tokio::test]
2720 4 : async fn aux_files_round_trip() -> anyhow::Result<()> {
2721 4 : let name = "aux_files_round_trip";
2722 4 : let harness = TenantHarness::create(name).await?;
2723 4 :
2724 4 : pub const TIMELINE_ID: TimelineId =
2725 4 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
2726 4 :
2727 4 : let (tenant, ctx) = harness.load().await;
2728 4 : let tline = tenant
2729 4 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
2730 4 : .await?;
2731 4 : let tline = tline.raw_timeline().unwrap();
2732 4 :
2733 4 : // First modification: insert two keys
2734 4 : let mut modification = tline.begin_modification(Lsn(0x1000));
2735 4 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
2736 4 : modification.set_lsn(Lsn(0x1008))?;
2737 4 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
2738 4 : modification.commit(&ctx).await?;
2739 4 : let expect_1008 = HashMap::from([
2740 4 : ("foo/bar1".to_string(), Bytes::from_static(b"content1")),
2741 4 : ("foo/bar2".to_string(), Bytes::from_static(b"content2")),
2742 4 : ]);
2743 4 :
2744 4 : let io_concurrency = IoConcurrency::spawn_for_test();
2745 4 :
2746 4 : let readback = tline
2747 4 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2748 4 : .await?;
2749 4 : assert_eq!(readback, expect_1008);
2750 4 :
2751 4 : // Second modification: update one key, remove the other
2752 4 : let mut modification = tline.begin_modification(Lsn(0x2000));
2753 4 : modification.put_file("foo/bar1", b"content3", &ctx).await?;
2754 4 : modification.set_lsn(Lsn(0x2008))?;
2755 4 : modification.put_file("foo/bar2", b"", &ctx).await?;
2756 4 : modification.commit(&ctx).await?;
2757 4 : let expect_2008 =
2758 4 : HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
2759 4 :
2760 4 : let readback = tline
2761 4 : .list_aux_files(Lsn(0x2008), &ctx, io_concurrency.clone())
2762 4 : .await?;
2763 4 : assert_eq!(readback, expect_2008);
2764 4 :
2765 4 : // Reading back in time works
2766 4 : let readback = tline
2767 4 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2768 4 : .await?;
2769 4 : assert_eq!(readback, expect_1008);
2770 4 :
2771 4 : Ok(())
2772 4 : }
2773 :
2774 : #[test]
2775 4 : fn gap_finding() {
2776 4 : let rel = RelTag {
2777 4 : spcnode: 1663,
2778 4 : dbnode: 208101,
2779 4 : relnode: 2620,
2780 4 : forknum: 0,
2781 4 : };
2782 4 : let base_blkno = 1;
2783 4 :
2784 4 : let base_key = rel_block_to_key(rel, base_blkno);
2785 4 : let before_base_key = rel_block_to_key(rel, base_blkno - 1);
2786 4 :
2787 4 : let shard = ShardIdentity::unsharded();
2788 4 :
2789 4 : let mut previous_nblocks = 0;
2790 44 : for i in 0..10 {
2791 40 : let crnt_blkno = base_blkno + i;
2792 40 : let gaps = DatadirModification::find_gaps(rel, crnt_blkno, previous_nblocks, &shard);
2793 40 :
2794 40 : previous_nblocks = crnt_blkno + 1;
2795 40 :
2796 40 : if i == 0 {
2797 : // The first block we write is 1, so we should find the gap.
2798 4 : assert_eq!(gaps.unwrap(), KeySpace::single(before_base_key..base_key));
2799 : } else {
2800 36 : assert!(gaps.is_none());
2801 : }
2802 : }
2803 :
2804 : // This is an update to an already existing block. No gaps here.
2805 4 : let update_blkno = 5;
2806 4 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2807 4 : assert!(gaps.is_none());
2808 :
2809 : // This is an update past the current end block.
2810 4 : let after_gap_blkno = 20;
2811 4 : let gaps = DatadirModification::find_gaps(rel, after_gap_blkno, previous_nblocks, &shard);
2812 4 :
2813 4 : let gap_start_key = rel_block_to_key(rel, previous_nblocks);
2814 4 : let after_gap_key = rel_block_to_key(rel, after_gap_blkno);
2815 4 : assert_eq!(
2816 4 : gaps.unwrap(),
2817 4 : KeySpace::single(gap_start_key..after_gap_key)
2818 4 : );
2819 4 : }
2820 :
2821 : #[test]
2822 4 : fn sharded_gap_finding() {
2823 4 : let rel = RelTag {
2824 4 : spcnode: 1663,
2825 4 : dbnode: 208101,
2826 4 : relnode: 2620,
2827 4 : forknum: 0,
2828 4 : };
2829 4 :
2830 4 : let first_blkno = 6;
2831 4 :
2832 4 : // This shard will get the even blocks
2833 4 : let shard = ShardIdentity::from_params(
2834 4 : ShardNumber(0),
2835 4 : &ShardParameters {
2836 4 : count: ShardCount(2),
2837 4 : stripe_size: ShardStripeSize(1),
2838 4 : },
2839 4 : );
2840 4 :
2841 4 : // Only keys belonging to this shard are considered as gaps.
2842 4 : let mut previous_nblocks = 0;
2843 4 : let gaps =
2844 4 : DatadirModification::find_gaps(rel, first_blkno, previous_nblocks, &shard).unwrap();
2845 4 : assert!(!gaps.ranges.is_empty());
2846 12 : for gap_range in gaps.ranges {
2847 8 : let mut k = gap_range.start;
2848 16 : while k != gap_range.end {
2849 8 : assert_eq!(shard.get_shard_number(&k), shard.number);
2850 8 : k = k.next();
2851 : }
2852 : }
2853 :
2854 4 : previous_nblocks = first_blkno;
2855 4 :
2856 4 : let update_blkno = 2;
2857 4 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2858 4 : assert!(gaps.is_none());
2859 4 : }
2860 :
2861 : /*
2862 : fn assert_current_logical_size<R: Repository>(timeline: &DatadirTimeline<R>, lsn: Lsn) {
2863 : let incremental = timeline.get_current_logical_size();
2864 : let non_incremental = timeline
2865 : .get_current_logical_size_non_incremental(lsn)
2866 : .unwrap();
2867 : assert_eq!(incremental, non_incremental);
2868 : }
2869 : */
2870 :
2871 : /*
2872 : ///
2873 : /// Test list_rels() function, with branches and dropped relations
2874 : ///
2875 : #[test]
2876 : fn test_list_rels_drop() -> Result<()> {
2877 : let repo = RepoHarness::create("test_list_rels_drop")?.load();
2878 : let tline = create_empty_timeline(repo, TIMELINE_ID)?;
2879 : const TESTDB: u32 = 111;
2880 :
2881 : // Import initial dummy checkpoint record, otherwise the get_timeline() call
2882 : // after branching fails below
2883 : let mut writer = tline.begin_record(Lsn(0x10));
2884 : writer.put_checkpoint(ZERO_CHECKPOINT.clone())?;
2885 : writer.finish()?;
2886 :
2887 : // Create a relation on the timeline
2888 : let mut writer = tline.begin_record(Lsn(0x20));
2889 : writer.put_rel_page_image(TESTREL_A, 0, TEST_IMG("foo blk 0 at 2"))?;
2890 : writer.finish()?;
2891 :
2892 : let writer = tline.begin_record(Lsn(0x00));
2893 : writer.finish()?;
2894 :
2895 : // Check that list_rels() lists it after LSN 2, but no before it
2896 : assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
2897 : assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
2898 : assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
2899 :
2900 : // Create a branch, check that the relation is visible there
2901 : repo.branch_timeline(&tline, NEW_TIMELINE_ID, Lsn(0x30))?;
2902 : let newtline = match repo.get_timeline(NEW_TIMELINE_ID)?.local_timeline() {
2903 : Some(timeline) => timeline,
2904 : None => panic!("Should have a local timeline"),
2905 : };
2906 : let newtline = DatadirTimelineImpl::new(newtline);
2907 : assert!(newtline
2908 : .list_rels(0, TESTDB, Lsn(0x30))?
2909 : .contains(&TESTREL_A));
2910 :
2911 : // Drop it on the branch
2912 : let mut new_writer = newtline.begin_record(Lsn(0x40));
2913 : new_writer.drop_relation(TESTREL_A)?;
2914 : new_writer.finish()?;
2915 :
2916 : // Check that it's no longer listed on the branch after the point where it was dropped
2917 : assert!(newtline
2918 : .list_rels(0, TESTDB, Lsn(0x30))?
2919 : .contains(&TESTREL_A));
2920 : assert!(!newtline
2921 : .list_rels(0, TESTDB, Lsn(0x40))?
2922 : .contains(&TESTREL_A));
2923 :
2924 : // Run checkpoint and garbage collection and check that it's still not visible
2925 : newtline.checkpoint(CheckpointConfig::Forced)?;
2926 : repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
2927 :
2928 : assert!(!newtline
2929 : .list_rels(0, TESTDB, Lsn(0x40))?
2930 : .contains(&TESTREL_A));
2931 :
2932 : Ok(())
2933 : }
2934 : */
2935 :
2936 : /*
2937 : #[test]
2938 : fn test_read_beyond_eof() -> Result<()> {
2939 : let repo = RepoHarness::create("test_read_beyond_eof")?.load();
2940 : let tline = create_test_timeline(repo, TIMELINE_ID)?;
2941 :
2942 : make_some_layers(&tline, Lsn(0x20))?;
2943 : let mut writer = tline.begin_record(Lsn(0x60));
2944 : walingest.put_rel_page_image(
2945 : &mut writer,
2946 : TESTREL_A,
2947 : 0,
2948 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x60))),
2949 : )?;
2950 : writer.finish()?;
2951 :
2952 : // Test read before rel creation. Should error out.
2953 : assert!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x10), false).is_err());
2954 :
2955 : // Read block beyond end of relation at different points in time.
2956 : // These reads should fall into different delta, image, and in-memory layers.
2957 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x20), false)?, ZERO_PAGE);
2958 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x25), false)?, ZERO_PAGE);
2959 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x30), false)?, ZERO_PAGE);
2960 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x35), false)?, ZERO_PAGE);
2961 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x40), false)?, ZERO_PAGE);
2962 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x45), false)?, ZERO_PAGE);
2963 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x50), false)?, ZERO_PAGE);
2964 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x55), false)?, ZERO_PAGE);
2965 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x60), false)?, ZERO_PAGE);
2966 :
2967 : // Test on an in-memory layer with no preceding layer
2968 : let mut writer = tline.begin_record(Lsn(0x70));
2969 : walingest.put_rel_page_image(
2970 : &mut writer,
2971 : TESTREL_B,
2972 : 0,
2973 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x70))),
2974 : )?;
2975 : writer.finish()?;
2976 :
2977 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_B, 1, Lsn(0x70), false)?6, ZERO_PAGE);
2978 :
2979 : Ok(())
2980 : }
2981 : */
2982 : }
|