Line data Source code
1 : //!
2 : //! This provides an abstraction to store PostgreSQL relations and other files
3 : //! in the key-value store that implements the Repository interface.
4 : //!
5 : //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as
6 : //! walingest.rs handles a few things like implicit relation creation and extension.
7 : //! Clarify that)
8 : //!
9 : use std::collections::{HashMap, HashSet, hash_map};
10 : use std::ops::{ControlFlow, Range};
11 :
12 : use crate::walingest::{WalIngestError, WalIngestErrorKind};
13 : use crate::{PERF_TRACE_TARGET, ensure_walingest};
14 : use anyhow::Context;
15 : use bytes::{Buf, Bytes, BytesMut};
16 : use enum_map::Enum;
17 : use pageserver_api::key::{
18 : AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, CompactKey, DBDIR_KEY, Key, RelDirExists,
19 : TWOPHASEDIR_KEY, dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range,
20 : rel_size_to_key, rel_tag_sparse_key, rel_tag_sparse_key_range, relmap_file_key,
21 : repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key,
22 : slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range,
23 : };
24 : use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace};
25 : use pageserver_api::models::RelSizeMigration;
26 : use pageserver_api::record::NeonWalRecord;
27 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
28 : use pageserver_api::shard::ShardIdentity;
29 : use pageserver_api::value::Value;
30 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
31 : use postgres_ffi::{BLCKSZ, Oid, RepOriginId, TimestampTz, TransactionId};
32 : use serde::{Deserialize, Serialize};
33 : use strum::IntoEnumIterator;
34 : use tokio_util::sync::CancellationToken;
35 : use tracing::{debug, info, info_span, trace, warn};
36 : use utils::bin_ser::{BeSer, DeserializeError};
37 : use utils::lsn::Lsn;
38 : use utils::pausable_failpoint;
39 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
40 :
41 : use super::tenant::{PageReconstructError, Timeline};
42 : use crate::aux_file;
43 : use crate::context::{PerfInstrumentFutureExt, RequestContext};
44 : use crate::keyspace::{KeySpace, KeySpaceAccum};
45 : use crate::metrics::{
46 : RELSIZE_CACHE_ENTRIES, RELSIZE_CACHE_HITS, RELSIZE_CACHE_MISSES, RELSIZE_CACHE_MISSES_OLD,
47 : };
48 : use crate::span::{
49 : debug_assert_current_span_has_tenant_and_timeline_id,
50 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
51 : };
52 : use crate::tenant::storage_layer::IoConcurrency;
53 : use crate::tenant::timeline::{GetVectoredError, VersionedKeySpaceQuery};
54 :
55 : /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached.
56 : pub const MAX_AUX_FILE_DELTAS: usize = 1024;
57 :
58 : /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached.
59 : pub const MAX_AUX_FILE_V2_DELTAS: usize = 16;
60 :
61 : #[derive(Debug)]
62 : pub enum LsnForTimestamp {
63 : /// Found commits both before and after the given timestamp
64 : Present(Lsn),
65 :
66 : /// Found no commits after the given timestamp, this means
67 : /// that the newest data in the branch is older than the given
68 : /// timestamp.
69 : ///
70 : /// All commits <= LSN happened before the given timestamp
71 : Future(Lsn),
72 :
73 : /// The queried timestamp is past our horizon we look back at (PITR)
74 : ///
75 : /// All commits > LSN happened after the given timestamp,
76 : /// but any commits < LSN might have happened before or after
77 : /// the given timestamp. We don't know because no data before
78 : /// the given lsn is available.
79 : Past(Lsn),
80 :
81 : /// We have found no commit with a timestamp,
82 : /// so we can't return anything meaningful.
83 : ///
84 : /// The associated LSN is the lower bound value we can safely
85 : /// create branches on, but no statement is made if it is
86 : /// older or newer than the timestamp.
87 : ///
88 : /// This variant can e.g. be returned right after a
89 : /// cluster import.
90 : NoData(Lsn),
91 : }
92 :
93 : #[derive(Debug, thiserror::Error)]
94 : pub(crate) enum CalculateLogicalSizeError {
95 : #[error("cancelled")]
96 : Cancelled,
97 :
98 : /// Something went wrong while reading the metadata we use to calculate logical size
99 : /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`]
100 : /// in the `From` implementation for this variant.
101 : #[error(transparent)]
102 : PageRead(PageReconstructError),
103 :
104 : /// Something went wrong deserializing metadata that we read to calculate logical size
105 : #[error("decode error: {0}")]
106 : Decode(#[from] DeserializeError),
107 : }
108 :
109 : #[derive(Debug, thiserror::Error)]
110 : pub(crate) enum CollectKeySpaceError {
111 : #[error(transparent)]
112 : Decode(#[from] DeserializeError),
113 : #[error(transparent)]
114 : PageRead(PageReconstructError),
115 : #[error("cancelled")]
116 : Cancelled,
117 : }
118 :
119 : impl From<PageReconstructError> for CollectKeySpaceError {
120 0 : fn from(err: PageReconstructError) -> Self {
121 0 : match err {
122 0 : PageReconstructError::Cancelled => Self::Cancelled,
123 0 : err => Self::PageRead(err),
124 : }
125 0 : }
126 : }
127 :
128 : impl From<PageReconstructError> for CalculateLogicalSizeError {
129 0 : fn from(pre: PageReconstructError) -> Self {
130 0 : match pre {
131 0 : PageReconstructError::Cancelled => Self::Cancelled,
132 0 : _ => Self::PageRead(pre),
133 : }
134 0 : }
135 : }
136 :
137 : #[derive(Debug, thiserror::Error)]
138 : pub enum RelationError {
139 : #[error("invalid relnode")]
140 : InvalidRelnode,
141 : }
142 :
143 : ///
144 : /// This impl provides all the functionality to store PostgreSQL relations, SLRUs,
145 : /// and other special kinds of files, in a versioned key-value store. The
146 : /// Timeline struct provides the key-value store.
147 : ///
148 : /// This is a separate impl, so that we can easily include all these functions in a Timeline
149 : /// implementation, and might be moved into a separate struct later.
150 : impl Timeline {
151 : /// Start ingesting a WAL record, or other atomic modification of
152 : /// the timeline.
153 : ///
154 : /// This provides a transaction-like interface to perform a bunch
155 : /// of modifications atomically.
156 : ///
157 : /// To ingest a WAL record, call begin_modification(lsn) to get a
158 : /// DatadirModification object. Use the functions in the object to
159 : /// modify the repository state, updating all the pages and metadata
160 : /// that the WAL record affects. When you're done, call commit() to
161 : /// commit the changes.
162 : ///
163 : /// Lsn stored in modification is advanced by `ingest_record` and
164 : /// is used by `commit()` to update `last_record_lsn`.
165 : ///
166 : /// Calling commit() will flush all the changes and reset the state,
167 : /// so the `DatadirModification` struct can be reused to perform the next modification.
168 : ///
169 : /// Note that any pending modifications you make through the
170 : /// modification object won't be visible to calls to the 'get' and list
171 : /// functions of the timeline until you finish! And if you update the
172 : /// same page twice, the last update wins.
173 : ///
174 1610520 : pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification
175 1610520 : where
176 1610520 : Self: Sized,
177 1610520 : {
178 1610520 : DatadirModification {
179 1610520 : tline: self,
180 1610520 : pending_lsns: Vec::new(),
181 1610520 : pending_metadata_pages: HashMap::new(),
182 1610520 : pending_data_batch: None,
183 1610520 : pending_deletions: Vec::new(),
184 1610520 : pending_nblocks: 0,
185 1610520 : pending_directory_entries: Vec::new(),
186 1610520 : pending_metadata_bytes: 0,
187 1610520 : lsn,
188 1610520 : }
189 1610520 : }
190 :
191 : //------------------------------------------------------------------------------
192 : // Public GET functions
193 : //------------------------------------------------------------------------------
194 :
195 : /// Look up given page version.
196 110304 : pub(crate) async fn get_rel_page_at_lsn(
197 110304 : &self,
198 110304 : tag: RelTag,
199 110304 : blknum: BlockNumber,
200 110304 : version: Version<'_>,
201 110304 : ctx: &RequestContext,
202 110304 : io_concurrency: IoConcurrency,
203 110304 : ) -> Result<Bytes, PageReconstructError> {
204 110304 : match version {
205 110304 : Version::Lsn(effective_lsn) => {
206 110304 : let pages: smallvec::SmallVec<[_; 1]> = smallvec::smallvec![(tag, blknum)];
207 110304 : let res = self
208 110304 : .get_rel_page_at_lsn_batched(
209 110304 : pages.iter().map(|(tag, blknum)| {
210 110304 : (tag, blknum, effective_lsn, ctx.attached_child())
211 110304 : }),
212 110304 : io_concurrency.clone(),
213 110304 : ctx,
214 110304 : )
215 110304 : .await;
216 110304 : assert_eq!(res.len(), 1);
217 110304 : res.into_iter().next().unwrap()
218 : }
219 0 : Version::Modified(modification) => {
220 0 : if tag.relnode == 0 {
221 0 : return Err(PageReconstructError::Other(
222 0 : RelationError::InvalidRelnode.into(),
223 0 : ));
224 0 : }
225 :
226 0 : let nblocks = self.get_rel_size(tag, version, ctx).await?;
227 0 : if blknum >= nblocks {
228 0 : debug!(
229 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
230 0 : tag,
231 0 : blknum,
232 0 : version.get_lsn(),
233 : nblocks
234 : );
235 0 : return Ok(ZERO_PAGE.clone());
236 0 : }
237 0 :
238 0 : let key = rel_block_to_key(tag, blknum);
239 0 : modification.get(key, ctx).await
240 : }
241 : }
242 110304 : }
243 :
244 : /// Like [`Self::get_rel_page_at_lsn`], but returns a batch of pages.
245 : ///
246 : /// The ordering of the returned vec corresponds to the ordering of `pages`.
247 110304 : pub(crate) async fn get_rel_page_at_lsn_batched(
248 110304 : &self,
249 110304 : pages: impl ExactSizeIterator<Item = (&RelTag, &BlockNumber, Lsn, RequestContext)>,
250 110304 : io_concurrency: IoConcurrency,
251 110304 : ctx: &RequestContext,
252 110304 : ) -> Vec<Result<Bytes, PageReconstructError>> {
253 110304 : debug_assert_current_span_has_tenant_and_timeline_id();
254 110304 :
255 110304 : let mut slots_filled = 0;
256 110304 : let page_count = pages.len();
257 110304 :
258 110304 : // Would be nice to use smallvec here but it doesn't provide the spare_capacity_mut() API.
259 110304 : let mut result = Vec::with_capacity(pages.len());
260 110304 : let result_slots = result.spare_capacity_mut();
261 110304 :
262 110304 : let mut keys_slots: HashMap<Key, smallvec::SmallVec<[(usize, RequestContext); 1]>> =
263 110304 : HashMap::with_capacity(pages.len());
264 110304 :
265 110304 : let mut req_keyspaces: HashMap<Lsn, KeySpaceRandomAccum> =
266 110304 : HashMap::with_capacity(pages.len());
267 :
268 110304 : for (response_slot_idx, (tag, blknum, lsn, ctx)) in pages.enumerate() {
269 110304 : if tag.relnode == 0 {
270 0 : result_slots[response_slot_idx].write(Err(PageReconstructError::Other(
271 0 : RelationError::InvalidRelnode.into(),
272 0 : )));
273 0 :
274 0 : slots_filled += 1;
275 0 : continue;
276 110304 : }
277 :
278 110304 : let nblocks = match self
279 110304 : .get_rel_size(*tag, Version::Lsn(lsn), &ctx)
280 110304 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
281 0 : info_span!(
282 : target: PERF_TRACE_TARGET,
283 0 : parent: crnt_perf_span,
284 : "GET_REL_SIZE",
285 : reltag=%tag,
286 : lsn=%lsn,
287 : )
288 110304 : })
289 110304 : .await
290 : {
291 110304 : Ok(nblocks) => nblocks,
292 0 : Err(err) => {
293 0 : result_slots[response_slot_idx].write(Err(err));
294 0 : slots_filled += 1;
295 0 : continue;
296 : }
297 : };
298 :
299 110304 : if *blknum >= nblocks {
300 0 : debug!(
301 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
302 : tag, blknum, lsn, nblocks
303 : );
304 0 : result_slots[response_slot_idx].write(Ok(ZERO_PAGE.clone()));
305 0 : slots_filled += 1;
306 0 : continue;
307 110304 : }
308 110304 :
309 110304 : let key = rel_block_to_key(*tag, *blknum);
310 110304 :
311 110304 : let key_slots = keys_slots.entry(key).or_default();
312 110304 : key_slots.push((response_slot_idx, ctx));
313 110304 :
314 110304 : let acc = req_keyspaces.entry(lsn).or_default();
315 110304 : acc.add_key(key);
316 : }
317 :
318 110304 : let query: Vec<(Lsn, KeySpace)> = req_keyspaces
319 110304 : .into_iter()
320 110304 : .map(|(lsn, acc)| (lsn, acc.to_keyspace()))
321 110304 : .collect();
322 110304 :
323 110304 : let query = VersionedKeySpaceQuery::scattered(query);
324 110304 : let res = self
325 110304 : .get_vectored(query, io_concurrency, ctx)
326 110304 : .maybe_perf_instrument(ctx, |current_perf_span| {
327 0 : info_span!(
328 : target: PERF_TRACE_TARGET,
329 0 : parent: current_perf_span,
330 : "GET_BATCH",
331 : batch_size = %page_count,
332 : )
333 110304 : })
334 110304 : .await;
335 :
336 110304 : match res {
337 110304 : Ok(results) => {
338 220608 : for (key, res) in results {
339 110304 : let mut key_slots = keys_slots.remove(&key).unwrap().into_iter();
340 110304 : let (first_slot, first_req_ctx) = key_slots.next().unwrap();
341 :
342 110304 : for (slot, req_ctx) in key_slots {
343 0 : let clone = match &res {
344 0 : Ok(buf) => Ok(buf.clone()),
345 0 : Err(err) => Err(match err {
346 0 : PageReconstructError::Cancelled => PageReconstructError::Cancelled,
347 :
348 0 : x @ PageReconstructError::Other(_)
349 0 : | x @ PageReconstructError::AncestorLsnTimeout(_)
350 0 : | x @ PageReconstructError::WalRedo(_)
351 0 : | x @ PageReconstructError::MissingKey(_) => {
352 0 : PageReconstructError::Other(anyhow::anyhow!(
353 0 : "there was more than one request for this key in the batch, error logged once: {x:?}"
354 0 : ))
355 : }
356 : }),
357 : };
358 :
359 0 : result_slots[slot].write(clone);
360 0 : // There is no standardized way to express that the batched span followed from N request spans.
361 0 : // So, abuse the system and mark the request contexts as follows_from the batch span, so we get
362 0 : // some linkage in our trace viewer. It allows us to answer: which GET_VECTORED did this GET_PAGE wait for.
363 0 : req_ctx.perf_follows_from(ctx);
364 0 : slots_filled += 1;
365 : }
366 :
367 110304 : result_slots[first_slot].write(res);
368 110304 : first_req_ctx.perf_follows_from(ctx);
369 110304 : slots_filled += 1;
370 : }
371 : }
372 0 : Err(err) => {
373 : // this cannot really happen because get_vectored only errors globally on invalid LSN or too large batch size
374 : // (We enforce the max batch size outside of this function, in the code that constructs the batch request.)
375 0 : for (slot, req_ctx) in keys_slots.values().flatten() {
376 : // this whole `match` is a lot like `From<GetVectoredError> for PageReconstructError`
377 : // but without taking ownership of the GetVectoredError
378 0 : let err = match &err {
379 0 : GetVectoredError::Cancelled => Err(PageReconstructError::Cancelled),
380 : // TODO: restructure get_vectored API to make this error per-key
381 0 : GetVectoredError::MissingKey(err) => {
382 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
383 0 : "whole vectored get request failed because one or more of the requested keys were missing: {err:?}"
384 0 : )))
385 : }
386 : // TODO: restructure get_vectored API to make this error per-key
387 0 : GetVectoredError::GetReadyAncestorError(err) => {
388 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
389 0 : "whole vectored get request failed because one or more key required ancestor that wasn't ready: {err:?}"
390 0 : )))
391 : }
392 : // TODO: restructure get_vectored API to make this error per-key
393 0 : GetVectoredError::Other(err) => Err(PageReconstructError::Other(
394 0 : anyhow::anyhow!("whole vectored get request failed: {err:?}"),
395 0 : )),
396 : // TODO: we can prevent this error class by moving this check into the type system
397 0 : GetVectoredError::InvalidLsn(e) => {
398 0 : Err(anyhow::anyhow!("invalid LSN: {e:?}").into())
399 : }
400 : // NB: this should never happen in practice because we limit MAX_GET_VECTORED_KEYS
401 : // TODO: we can prevent this error class by moving this check into the type system
402 0 : GetVectoredError::Oversized(err) => {
403 0 : Err(anyhow::anyhow!("batching oversized: {err:?}").into())
404 : }
405 : };
406 :
407 0 : req_ctx.perf_follows_from(ctx);
408 0 : result_slots[*slot].write(err);
409 : }
410 :
411 0 : slots_filled += keys_slots.values().map(|slots| slots.len()).sum::<usize>();
412 0 : }
413 : };
414 :
415 110304 : assert_eq!(slots_filled, page_count);
416 : // SAFETY:
417 : // 1. `result` and any of its uninint members are not read from until this point
418 : // 2. The length below is tracked at run-time and matches the number of requested pages.
419 110304 : unsafe {
420 110304 : result.set_len(page_count);
421 110304 : }
422 110304 :
423 110304 : result
424 110304 : }
425 :
426 : /// Get size of a database in blocks. This is only accurate on shard 0. It will undercount on
427 : /// other shards, by only accounting for relations the shard has pages for, and only accounting
428 : /// for pages up to the highest page number it has stored.
429 0 : pub(crate) async fn get_db_size(
430 0 : &self,
431 0 : spcnode: Oid,
432 0 : dbnode: Oid,
433 0 : version: Version<'_>,
434 0 : ctx: &RequestContext,
435 0 : ) -> Result<usize, PageReconstructError> {
436 0 : let mut total_blocks = 0;
437 :
438 0 : let rels = self.list_rels(spcnode, dbnode, version, ctx).await?;
439 :
440 0 : for rel in rels {
441 0 : let n_blocks = self.get_rel_size(rel, version, ctx).await?;
442 0 : total_blocks += n_blocks as usize;
443 : }
444 0 : Ok(total_blocks)
445 0 : }
446 :
447 : /// Get size of a relation file. The relation must exist, otherwise an error is returned.
448 : ///
449 : /// This is only accurate on shard 0. On other shards, it will return the size up to the highest
450 : /// page number stored in the shard.
451 146604 : pub(crate) async fn get_rel_size(
452 146604 : &self,
453 146604 : tag: RelTag,
454 146604 : version: Version<'_>,
455 146604 : ctx: &RequestContext,
456 146604 : ) -> Result<BlockNumber, PageReconstructError> {
457 146604 : if tag.relnode == 0 {
458 0 : return Err(PageReconstructError::Other(
459 0 : RelationError::InvalidRelnode.into(),
460 0 : ));
461 146604 : }
462 :
463 146604 : if let Some(nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
464 115764 : return Ok(nblocks);
465 30840 : }
466 30840 :
467 30840 : if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM)
468 0 : && !self.get_rel_exists(tag, version, ctx).await?
469 : {
470 : // FIXME: Postgres sometimes calls smgrcreate() to create
471 : // FSM, and smgrnblocks() on it immediately afterwards,
472 : // without extending it. Tolerate that by claiming that
473 : // any non-existent FSM fork has size 0.
474 0 : return Ok(0);
475 30840 : }
476 30840 :
477 30840 : let key = rel_size_to_key(tag);
478 30840 : let mut buf = version.get(self, key, ctx).await?;
479 30816 : let nblocks = buf.get_u32_le();
480 30816 :
481 30816 : self.update_cached_rel_size(tag, version.get_lsn(), nblocks);
482 30816 :
483 30816 : Ok(nblocks)
484 146604 : }
485 :
486 : /// Does the relation exist?
487 : ///
488 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
489 : /// the shard stores pages for.
490 36300 : pub(crate) async fn get_rel_exists(
491 36300 : &self,
492 36300 : tag: RelTag,
493 36300 : version: Version<'_>,
494 36300 : ctx: &RequestContext,
495 36300 : ) -> Result<bool, PageReconstructError> {
496 36300 : if tag.relnode == 0 {
497 0 : return Err(PageReconstructError::Other(
498 0 : RelationError::InvalidRelnode.into(),
499 0 : ));
500 36300 : }
501 :
502 : // first try to lookup relation in cache
503 36300 : if let Some(_nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
504 36192 : return Ok(true);
505 108 : }
506 : // then check if the database was already initialized.
507 : // get_rel_exists can be called before dbdir is created.
508 108 : let buf = version.get(self, DBDIR_KEY, ctx).await?;
509 108 : let dbdirs = DbDirectory::des(&buf)?.dbdirs;
510 108 : if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) {
511 0 : return Ok(false);
512 108 : }
513 108 :
514 108 : // Read path: first read the new reldir keyspace. Early return if the relation exists.
515 108 : // Otherwise, read the old reldir keyspace.
516 108 : // TODO: if IndexPart::rel_size_migration is `Migrated`, we only need to read from v2.
517 108 :
518 108 : if let RelSizeMigration::Migrated | RelSizeMigration::Migrating =
519 108 : self.get_rel_size_v2_status()
520 : {
521 : // fetch directory listing (new)
522 0 : let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
523 0 : let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?)
524 0 : .map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
525 0 : let exists_v2 = buf == RelDirExists::Exists;
526 0 : // Fast path: if the relation exists in the new format, return true.
527 0 : // TODO: we should have a verification mode that checks both keyspaces
528 0 : // to ensure the relation only exists in one of them.
529 0 : if exists_v2 {
530 0 : return Ok(true);
531 0 : }
532 108 : }
533 :
534 : // fetch directory listing (old)
535 :
536 108 : let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
537 108 : let buf = version.get(self, key, ctx).await?;
538 :
539 108 : let dir = RelDirectory::des(&buf)?;
540 108 : let exists_v1 = dir.rels.contains(&(tag.relnode, tag.forknum));
541 108 : Ok(exists_v1)
542 36300 : }
543 :
544 : /// Get a list of all existing relations in given tablespace and database.
545 : ///
546 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
547 : /// the shard stores pages for.
548 : ///
549 : /// # Cancel-Safety
550 : ///
551 : /// This method is cancellation-safe.
552 0 : pub(crate) async fn list_rels(
553 0 : &self,
554 0 : spcnode: Oid,
555 0 : dbnode: Oid,
556 0 : version: Version<'_>,
557 0 : ctx: &RequestContext,
558 0 : ) -> Result<HashSet<RelTag>, PageReconstructError> {
559 0 : // fetch directory listing (old)
560 0 : let key = rel_dir_to_key(spcnode, dbnode);
561 0 : let buf = version.get(self, key, ctx).await?;
562 :
563 0 : let dir = RelDirectory::des(&buf)?;
564 0 : let rels_v1: HashSet<RelTag> =
565 0 : HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
566 0 : spcnode,
567 0 : dbnode,
568 0 : relnode: *relnode,
569 0 : forknum: *forknum,
570 0 : }));
571 0 :
572 0 : if let RelSizeMigration::Legacy = self.get_rel_size_v2_status() {
573 0 : return Ok(rels_v1);
574 0 : }
575 0 :
576 0 : // scan directory listing (new), merge with the old results
577 0 : let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
578 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
579 0 : self.conf,
580 0 : self.gate
581 0 : .enter()
582 0 : .map_err(|_| PageReconstructError::Cancelled)?,
583 : );
584 0 : let results = self
585 0 : .scan(
586 0 : KeySpace::single(key_range),
587 0 : version.get_lsn(),
588 0 : ctx,
589 0 : io_concurrency,
590 0 : )
591 0 : .await?;
592 0 : let mut rels = rels_v1;
593 0 : for (key, val) in results {
594 0 : let val = RelDirExists::decode(&val?)
595 0 : .map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
596 0 : assert_eq!(key.field6, 1);
597 0 : assert_eq!(key.field2, spcnode);
598 0 : assert_eq!(key.field3, dbnode);
599 0 : let tag = RelTag {
600 0 : spcnode,
601 0 : dbnode,
602 0 : relnode: key.field4,
603 0 : forknum: key.field5,
604 0 : };
605 0 : if val == RelDirExists::Removed {
606 0 : debug_assert!(!rels.contains(&tag), "removed reltag in v2");
607 0 : continue;
608 0 : }
609 0 : let did_not_contain = rels.insert(tag);
610 0 : debug_assert!(did_not_contain, "duplicate reltag in v2");
611 : }
612 0 : Ok(rels)
613 0 : }
614 :
615 : /// Get the whole SLRU segment
616 0 : pub(crate) async fn get_slru_segment(
617 0 : &self,
618 0 : kind: SlruKind,
619 0 : segno: u32,
620 0 : lsn: Lsn,
621 0 : ctx: &RequestContext,
622 0 : ) -> Result<Bytes, PageReconstructError> {
623 0 : assert!(self.tenant_shard_id.is_shard_zero());
624 0 : let n_blocks = self
625 0 : .get_slru_segment_size(kind, segno, Version::Lsn(lsn), ctx)
626 0 : .await?;
627 :
628 0 : let keyspace = KeySpace::single(
629 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, n_blocks),
630 0 : );
631 0 :
632 0 : let batches = keyspace.partition(
633 0 : self.get_shard_identity(),
634 0 : Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64,
635 0 : );
636 :
637 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
638 0 : self.conf,
639 0 : self.gate
640 0 : .enter()
641 0 : .map_err(|_| PageReconstructError::Cancelled)?,
642 : );
643 :
644 0 : let mut segment = BytesMut::with_capacity(n_blocks as usize * BLCKSZ as usize);
645 0 : for batch in batches.parts {
646 0 : let query = VersionedKeySpaceQuery::uniform(batch, lsn);
647 0 : let blocks = self
648 0 : .get_vectored(query, io_concurrency.clone(), ctx)
649 0 : .await?;
650 :
651 0 : for (_key, block) in blocks {
652 0 : let block = block?;
653 0 : segment.extend_from_slice(&block[..BLCKSZ as usize]);
654 : }
655 : }
656 :
657 0 : Ok(segment.freeze())
658 0 : }
659 :
660 : /// Get size of an SLRU segment
661 0 : pub(crate) async fn get_slru_segment_size(
662 0 : &self,
663 0 : kind: SlruKind,
664 0 : segno: u32,
665 0 : version: Version<'_>,
666 0 : ctx: &RequestContext,
667 0 : ) -> Result<BlockNumber, PageReconstructError> {
668 0 : assert!(self.tenant_shard_id.is_shard_zero());
669 0 : let key = slru_segment_size_to_key(kind, segno);
670 0 : let mut buf = version.get(self, key, ctx).await?;
671 0 : Ok(buf.get_u32_le())
672 0 : }
673 :
674 : /// Does the slru segment exist?
675 0 : pub(crate) async fn get_slru_segment_exists(
676 0 : &self,
677 0 : kind: SlruKind,
678 0 : segno: u32,
679 0 : version: Version<'_>,
680 0 : ctx: &RequestContext,
681 0 : ) -> Result<bool, PageReconstructError> {
682 0 : assert!(self.tenant_shard_id.is_shard_zero());
683 : // fetch directory listing
684 0 : let key = slru_dir_to_key(kind);
685 0 : let buf = version.get(self, key, ctx).await?;
686 :
687 0 : let dir = SlruSegmentDirectory::des(&buf)?;
688 0 : Ok(dir.segments.contains(&segno))
689 0 : }
690 :
691 : /// Locate LSN, such that all transactions that committed before
692 : /// 'search_timestamp' are visible, but nothing newer is.
693 : ///
694 : /// This is not exact. Commit timestamps are not guaranteed to be ordered,
695 : /// so it's not well defined which LSN you get if there were multiple commits
696 : /// "in flight" at that point in time.
697 : ///
698 0 : pub(crate) async fn find_lsn_for_timestamp(
699 0 : &self,
700 0 : search_timestamp: TimestampTz,
701 0 : cancel: &CancellationToken,
702 0 : ctx: &RequestContext,
703 0 : ) -> Result<LsnForTimestamp, PageReconstructError> {
704 0 : pausable_failpoint!("find-lsn-for-timestamp-pausable");
705 :
706 0 : let gc_cutoff_lsn_guard = self.get_applied_gc_cutoff_lsn();
707 0 : let gc_cutoff_planned = {
708 0 : let gc_info = self.gc_info.read().unwrap();
709 0 : gc_info.min_cutoff()
710 0 : };
711 0 : // Usually the planned cutoff is newer than the cutoff of the last gc run,
712 0 : // but let's be defensive.
713 0 : let gc_cutoff = gc_cutoff_planned.max(*gc_cutoff_lsn_guard);
714 0 : // We use this method to figure out the branching LSN for the new branch, but the
715 0 : // GC cutoff could be before the branching point and we cannot create a new branch
716 0 : // with LSN < `ancestor_lsn`. Thus, pick the maximum of these two to be
717 0 : // on the safe side.
718 0 : let min_lsn = std::cmp::max(gc_cutoff, self.get_ancestor_lsn());
719 0 : let max_lsn = self.get_last_record_lsn();
720 0 :
721 0 : // LSNs are always 8-byte aligned. low/mid/high represent the
722 0 : // LSN divided by 8.
723 0 : let mut low = min_lsn.0 / 8;
724 0 : let mut high = max_lsn.0 / 8 + 1;
725 0 :
726 0 : let mut found_smaller = false;
727 0 : let mut found_larger = false;
728 :
729 0 : while low < high {
730 0 : if cancel.is_cancelled() {
731 0 : return Err(PageReconstructError::Cancelled);
732 0 : }
733 0 : // cannot overflow, high and low are both smaller than u64::MAX / 2
734 0 : let mid = (high + low) / 2;
735 :
736 0 : let cmp = match self
737 0 : .is_latest_commit_timestamp_ge_than(
738 0 : search_timestamp,
739 0 : Lsn(mid * 8),
740 0 : &mut found_smaller,
741 0 : &mut found_larger,
742 0 : ctx,
743 0 : )
744 0 : .await
745 : {
746 0 : Ok(res) => res,
747 0 : Err(PageReconstructError::MissingKey(e)) => {
748 0 : warn!(
749 0 : "Missing key while find_lsn_for_timestamp. Either we might have already garbage-collected that data or the key is really missing. Last error: {:#}",
750 : e
751 : );
752 : // Return that we didn't find any requests smaller than the LSN, and logging the error.
753 0 : return Ok(LsnForTimestamp::Past(min_lsn));
754 : }
755 0 : Err(e) => return Err(e),
756 : };
757 :
758 0 : if cmp {
759 0 : high = mid;
760 0 : } else {
761 0 : low = mid + 1;
762 0 : }
763 : }
764 :
765 : // If `found_smaller == true`, `low = t + 1` where `t` is the target LSN,
766 : // so the LSN of the last commit record before or at `search_timestamp`.
767 : // Remove one from `low` to get `t`.
768 : //
769 : // FIXME: it would be better to get the LSN of the previous commit.
770 : // Otherwise, if you restore to the returned LSN, the database will
771 : // include physical changes from later commits that will be marked
772 : // as aborted, and will need to be vacuumed away.
773 0 : let commit_lsn = Lsn((low - 1) * 8);
774 0 : match (found_smaller, found_larger) {
775 : (false, false) => {
776 : // This can happen if no commit records have been processed yet, e.g.
777 : // just after importing a cluster.
778 0 : Ok(LsnForTimestamp::NoData(min_lsn))
779 : }
780 : (false, true) => {
781 : // Didn't find any commit timestamps smaller than the request
782 0 : Ok(LsnForTimestamp::Past(min_lsn))
783 : }
784 0 : (true, _) if commit_lsn < min_lsn => {
785 0 : // the search above did set found_smaller to true but it never increased the lsn.
786 0 : // Then, low is still the old min_lsn, and the subtraction above gave a value
787 0 : // below the min_lsn. We should never do that.
788 0 : Ok(LsnForTimestamp::Past(min_lsn))
789 : }
790 : (true, false) => {
791 : // Only found commits with timestamps smaller than the request.
792 : // It's still a valid case for branch creation, return it.
793 : // And `update_gc_info()` ignores LSN for a `LsnForTimestamp::Future`
794 : // case, anyway.
795 0 : Ok(LsnForTimestamp::Future(commit_lsn))
796 : }
797 0 : (true, true) => Ok(LsnForTimestamp::Present(commit_lsn)),
798 : }
799 0 : }
800 :
801 : /// Subroutine of find_lsn_for_timestamp(). Returns true, if there are any
802 : /// commits that committed after 'search_timestamp', at LSN 'probe_lsn'.
803 : ///
804 : /// Additionally, sets 'found_smaller'/'found_Larger, if encounters any commits
805 : /// with a smaller/larger timestamp.
806 : ///
807 0 : pub(crate) async fn is_latest_commit_timestamp_ge_than(
808 0 : &self,
809 0 : search_timestamp: TimestampTz,
810 0 : probe_lsn: Lsn,
811 0 : found_smaller: &mut bool,
812 0 : found_larger: &mut bool,
813 0 : ctx: &RequestContext,
814 0 : ) -> Result<bool, PageReconstructError> {
815 0 : self.map_all_timestamps(probe_lsn, ctx, |timestamp| {
816 0 : if timestamp >= search_timestamp {
817 0 : *found_larger = true;
818 0 : return ControlFlow::Break(true);
819 0 : } else {
820 0 : *found_smaller = true;
821 0 : }
822 0 : ControlFlow::Continue(())
823 0 : })
824 0 : .await
825 0 : }
826 :
827 : /// Obtain the timestamp for the given lsn.
828 : ///
829 : /// If the lsn has no timestamps (e.g. no commits), returns None.
830 0 : pub(crate) async fn get_timestamp_for_lsn(
831 0 : &self,
832 0 : probe_lsn: Lsn,
833 0 : ctx: &RequestContext,
834 0 : ) -> Result<Option<TimestampTz>, PageReconstructError> {
835 0 : let mut max: Option<TimestampTz> = None;
836 0 : self.map_all_timestamps::<()>(probe_lsn, ctx, |timestamp| {
837 0 : if let Some(max_prev) = max {
838 0 : max = Some(max_prev.max(timestamp));
839 0 : } else {
840 0 : max = Some(timestamp);
841 0 : }
842 0 : ControlFlow::Continue(())
843 0 : })
844 0 : .await?;
845 :
846 0 : Ok(max)
847 0 : }
848 :
849 : /// Runs the given function on all the timestamps for a given lsn
850 : ///
851 : /// The return value is either given by the closure, or set to the `Default`
852 : /// impl's output.
853 0 : async fn map_all_timestamps<T: Default>(
854 0 : &self,
855 0 : probe_lsn: Lsn,
856 0 : ctx: &RequestContext,
857 0 : mut f: impl FnMut(TimestampTz) -> ControlFlow<T>,
858 0 : ) -> Result<T, PageReconstructError> {
859 0 : for segno in self
860 0 : .list_slru_segments(SlruKind::Clog, Version::Lsn(probe_lsn), ctx)
861 0 : .await?
862 : {
863 0 : let nblocks = self
864 0 : .get_slru_segment_size(SlruKind::Clog, segno, Version::Lsn(probe_lsn), ctx)
865 0 : .await?;
866 :
867 0 : let keyspace = KeySpace::single(
868 0 : slru_block_to_key(SlruKind::Clog, segno, 0)
869 0 : ..slru_block_to_key(SlruKind::Clog, segno, nblocks),
870 0 : );
871 0 :
872 0 : let batches = keyspace.partition(
873 0 : self.get_shard_identity(),
874 0 : Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64,
875 0 : );
876 :
877 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
878 0 : self.conf,
879 0 : self.gate
880 0 : .enter()
881 0 : .map_err(|_| PageReconstructError::Cancelled)?,
882 : );
883 :
884 0 : for batch in batches.parts.into_iter().rev() {
885 0 : let query = VersionedKeySpaceQuery::uniform(batch, probe_lsn);
886 0 : let blocks = self
887 0 : .get_vectored(query, io_concurrency.clone(), ctx)
888 0 : .await?;
889 :
890 0 : for (_key, clog_page) in blocks.into_iter().rev() {
891 0 : let clog_page = clog_page?;
892 :
893 0 : if clog_page.len() == BLCKSZ as usize + 8 {
894 0 : let mut timestamp_bytes = [0u8; 8];
895 0 : timestamp_bytes.copy_from_slice(&clog_page[BLCKSZ as usize..]);
896 0 : let timestamp = TimestampTz::from_be_bytes(timestamp_bytes);
897 0 :
898 0 : match f(timestamp) {
899 0 : ControlFlow::Break(b) => return Ok(b),
900 0 : ControlFlow::Continue(()) => (),
901 : }
902 0 : }
903 : }
904 : }
905 : }
906 0 : Ok(Default::default())
907 0 : }
908 :
909 0 : pub(crate) async fn get_slru_keyspace(
910 0 : &self,
911 0 : version: Version<'_>,
912 0 : ctx: &RequestContext,
913 0 : ) -> Result<KeySpace, PageReconstructError> {
914 0 : let mut accum = KeySpaceAccum::new();
915 :
916 0 : for kind in SlruKind::iter() {
917 0 : let mut segments: Vec<u32> = self
918 0 : .list_slru_segments(kind, version, ctx)
919 0 : .await?
920 0 : .into_iter()
921 0 : .collect();
922 0 : segments.sort_unstable();
923 :
924 0 : for seg in segments {
925 0 : let block_count = self.get_slru_segment_size(kind, seg, version, ctx).await?;
926 :
927 0 : accum.add_range(
928 0 : slru_block_to_key(kind, seg, 0)..slru_block_to_key(kind, seg, block_count),
929 0 : );
930 : }
931 : }
932 :
933 0 : Ok(accum.to_keyspace())
934 0 : }
935 :
936 : /// Get a list of SLRU segments
937 0 : pub(crate) async fn list_slru_segments(
938 0 : &self,
939 0 : kind: SlruKind,
940 0 : version: Version<'_>,
941 0 : ctx: &RequestContext,
942 0 : ) -> Result<HashSet<u32>, PageReconstructError> {
943 0 : // fetch directory entry
944 0 : let key = slru_dir_to_key(kind);
945 :
946 0 : let buf = version.get(self, key, ctx).await?;
947 0 : Ok(SlruSegmentDirectory::des(&buf)?.segments)
948 0 : }
949 :
950 0 : pub(crate) async fn get_relmap_file(
951 0 : &self,
952 0 : spcnode: Oid,
953 0 : dbnode: Oid,
954 0 : version: Version<'_>,
955 0 : ctx: &RequestContext,
956 0 : ) -> Result<Bytes, PageReconstructError> {
957 0 : let key = relmap_file_key(spcnode, dbnode);
958 :
959 0 : let buf = version.get(self, key, ctx).await?;
960 0 : Ok(buf)
961 0 : }
962 :
963 1992 : pub(crate) async fn list_dbdirs(
964 1992 : &self,
965 1992 : lsn: Lsn,
966 1992 : ctx: &RequestContext,
967 1992 : ) -> Result<HashMap<(Oid, Oid), bool>, PageReconstructError> {
968 : // fetch directory entry
969 1992 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
970 :
971 1992 : Ok(DbDirectory::des(&buf)?.dbdirs)
972 1992 : }
973 :
974 0 : pub(crate) async fn get_twophase_file(
975 0 : &self,
976 0 : xid: u64,
977 0 : lsn: Lsn,
978 0 : ctx: &RequestContext,
979 0 : ) -> Result<Bytes, PageReconstructError> {
980 0 : let key = twophase_file_key(xid);
981 0 : let buf = self.get(key, lsn, ctx).await?;
982 0 : Ok(buf)
983 0 : }
984 :
985 2004 : pub(crate) async fn list_twophase_files(
986 2004 : &self,
987 2004 : lsn: Lsn,
988 2004 : ctx: &RequestContext,
989 2004 : ) -> Result<HashSet<u64>, PageReconstructError> {
990 : // fetch directory entry
991 2004 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
992 :
993 2004 : if self.pg_version >= 17 {
994 1848 : Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
995 : } else {
996 156 : Ok(TwoPhaseDirectory::des(&buf)?
997 : .xids
998 156 : .iter()
999 156 : .map(|x| u64::from(*x))
1000 156 : .collect())
1001 : }
1002 2004 : }
1003 :
1004 0 : pub(crate) async fn get_control_file(
1005 0 : &self,
1006 0 : lsn: Lsn,
1007 0 : ctx: &RequestContext,
1008 0 : ) -> Result<Bytes, PageReconstructError> {
1009 0 : self.get(CONTROLFILE_KEY, lsn, ctx).await
1010 0 : }
1011 :
1012 72 : pub(crate) async fn get_checkpoint(
1013 72 : &self,
1014 72 : lsn: Lsn,
1015 72 : ctx: &RequestContext,
1016 72 : ) -> Result<Bytes, PageReconstructError> {
1017 72 : self.get(CHECKPOINT_KEY, lsn, ctx).await
1018 72 : }
1019 :
1020 72 : async fn list_aux_files_v2(
1021 72 : &self,
1022 72 : lsn: Lsn,
1023 72 : ctx: &RequestContext,
1024 72 : io_concurrency: IoConcurrency,
1025 72 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
1026 72 : let kv = self
1027 72 : .scan(
1028 72 : KeySpace::single(Key::metadata_aux_key_range()),
1029 72 : lsn,
1030 72 : ctx,
1031 72 : io_concurrency,
1032 72 : )
1033 72 : .await?;
1034 72 : let mut result = HashMap::new();
1035 72 : let mut sz = 0;
1036 180 : for (_, v) in kv {
1037 108 : let v = v?;
1038 108 : let v = aux_file::decode_file_value_bytes(&v)
1039 108 : .context("value decode")
1040 108 : .map_err(PageReconstructError::Other)?;
1041 204 : for (fname, content) in v {
1042 96 : sz += fname.len();
1043 96 : sz += content.len();
1044 96 : result.insert(fname, content);
1045 96 : }
1046 : }
1047 72 : self.aux_file_size_estimator.on_initial(sz);
1048 72 : Ok(result)
1049 72 : }
1050 :
1051 0 : pub(crate) async fn trigger_aux_file_size_computation(
1052 0 : &self,
1053 0 : lsn: Lsn,
1054 0 : ctx: &RequestContext,
1055 0 : io_concurrency: IoConcurrency,
1056 0 : ) -> Result<(), PageReconstructError> {
1057 0 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await?;
1058 0 : Ok(())
1059 0 : }
1060 :
1061 72 : pub(crate) async fn list_aux_files(
1062 72 : &self,
1063 72 : lsn: Lsn,
1064 72 : ctx: &RequestContext,
1065 72 : io_concurrency: IoConcurrency,
1066 72 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
1067 72 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await
1068 72 : }
1069 :
1070 0 : pub(crate) async fn get_replorigins(
1071 0 : &self,
1072 0 : lsn: Lsn,
1073 0 : ctx: &RequestContext,
1074 0 : io_concurrency: IoConcurrency,
1075 0 : ) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
1076 0 : let kv = self
1077 0 : .scan(
1078 0 : KeySpace::single(repl_origin_key_range()),
1079 0 : lsn,
1080 0 : ctx,
1081 0 : io_concurrency,
1082 0 : )
1083 0 : .await?;
1084 0 : let mut result = HashMap::new();
1085 0 : for (k, v) in kv {
1086 0 : let v = v?;
1087 0 : let origin_id = k.field6 as RepOriginId;
1088 0 : let origin_lsn = Lsn::des(&v).unwrap();
1089 0 : if origin_lsn != Lsn::INVALID {
1090 0 : result.insert(origin_id, origin_lsn);
1091 0 : }
1092 : }
1093 0 : Ok(result)
1094 0 : }
1095 :
1096 : /// Does the same as get_current_logical_size but counted on demand.
1097 : /// Used to initialize the logical size tracking on startup.
1098 : ///
1099 : /// Only relation blocks are counted currently. That excludes metadata,
1100 : /// SLRUs, twophase files etc.
1101 : ///
1102 : /// # Cancel-Safety
1103 : ///
1104 : /// This method is cancellation-safe.
1105 84 : pub(crate) async fn get_current_logical_size_non_incremental(
1106 84 : &self,
1107 84 : lsn: Lsn,
1108 84 : ctx: &RequestContext,
1109 84 : ) -> Result<u64, CalculateLogicalSizeError> {
1110 84 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
1111 84 :
1112 84 : fail::fail_point!("skip-logical-size-calculation", |_| { Ok(0) });
1113 :
1114 : // Fetch list of database dirs and iterate them
1115 84 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
1116 84 : let dbdir = DbDirectory::des(&buf)?;
1117 :
1118 84 : let mut total_size: u64 = 0;
1119 84 : for (spcnode, dbnode) in dbdir.dbdirs.keys() {
1120 0 : for rel in self
1121 0 : .list_rels(*spcnode, *dbnode, Version::Lsn(lsn), ctx)
1122 0 : .await?
1123 : {
1124 0 : if self.cancel.is_cancelled() {
1125 0 : return Err(CalculateLogicalSizeError::Cancelled);
1126 0 : }
1127 0 : let relsize_key = rel_size_to_key(rel);
1128 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1129 0 : let relsize = buf.get_u32_le();
1130 0 :
1131 0 : total_size += relsize as u64;
1132 : }
1133 : }
1134 84 : Ok(total_size * BLCKSZ as u64)
1135 84 : }
1136 :
1137 : /// Get a KeySpace that covers all the Keys that are in use at AND below the given LSN. This is only used
1138 : /// for gc-compaction.
1139 : ///
1140 : /// gc-compaction cannot use the same `collect_keyspace` function as the legacy compaction because it
1141 : /// processes data at multiple LSNs and needs to be aware of the fact that some key ranges might need to
1142 : /// be kept only for a specific range of LSN.
1143 : ///
1144 : /// Consider the case that the user created branches at LSN 10 and 20, where the user created a table A at
1145 : /// LSN 10 and dropped that table at LSN 20. `collect_keyspace` at LSN 10 will return the key range
1146 : /// corresponding to that table, while LSN 20 won't. The keyspace info at a single LSN is not enough to
1147 : /// determine which keys to retain/drop for gc-compaction.
1148 : ///
1149 : /// For now, it only drops AUX-v1 keys. But in the future, the function will be extended to return the keyspace
1150 : /// to be retained for each of the branch LSN.
1151 : ///
1152 : /// The return value is (dense keyspace, sparse keyspace).
1153 324 : pub(crate) async fn collect_gc_compaction_keyspace(
1154 324 : &self,
1155 324 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1156 324 : let metadata_key_begin = Key::metadata_key_range().start;
1157 324 : let aux_v1_key = AUX_FILES_KEY;
1158 324 : let dense_keyspace = KeySpace {
1159 324 : ranges: vec![Key::MIN..aux_v1_key, aux_v1_key.next()..metadata_key_begin],
1160 324 : };
1161 324 : Ok((
1162 324 : dense_keyspace,
1163 324 : SparseKeySpace(KeySpace::single(Key::metadata_key_range())),
1164 324 : ))
1165 324 : }
1166 :
1167 : ///
1168 : /// Get a KeySpace that covers all the Keys that are in use at the given LSN.
1169 : /// Anything that's not listed maybe removed from the underlying storage (from
1170 : /// that LSN forwards).
1171 : ///
1172 : /// The return value is (dense keyspace, sparse keyspace).
1173 1992 : pub(crate) async fn collect_keyspace(
1174 1992 : &self,
1175 1992 : lsn: Lsn,
1176 1992 : ctx: &RequestContext,
1177 1992 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1178 1992 : // Iterate through key ranges, greedily packing them into partitions
1179 1992 : let mut result = KeySpaceAccum::new();
1180 1992 :
1181 1992 : // The dbdir metadata always exists
1182 1992 : result.add_key(DBDIR_KEY);
1183 :
1184 : // Fetch list of database dirs and iterate them
1185 1992 : let dbdir = self.list_dbdirs(lsn, ctx).await?;
1186 1992 : let mut dbs: Vec<((Oid, Oid), bool)> = dbdir.into_iter().collect();
1187 1992 :
1188 1992 : dbs.sort_unstable_by(|(k_a, _), (k_b, _)| k_a.cmp(k_b));
1189 1992 : for ((spcnode, dbnode), has_relmap_file) in dbs {
1190 0 : if has_relmap_file {
1191 0 : result.add_key(relmap_file_key(spcnode, dbnode));
1192 0 : }
1193 0 : result.add_key(rel_dir_to_key(spcnode, dbnode));
1194 :
1195 0 : let mut rels: Vec<RelTag> = self
1196 0 : .list_rels(spcnode, dbnode, Version::Lsn(lsn), ctx)
1197 0 : .await?
1198 0 : .into_iter()
1199 0 : .collect();
1200 0 : rels.sort_unstable();
1201 0 : for rel in rels {
1202 0 : let relsize_key = rel_size_to_key(rel);
1203 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1204 0 : let relsize = buf.get_u32_le();
1205 0 :
1206 0 : result.add_range(rel_block_to_key(rel, 0)..rel_block_to_key(rel, relsize));
1207 0 : result.add_key(relsize_key);
1208 : }
1209 : }
1210 :
1211 : // Iterate SLRUs next
1212 1992 : if self.tenant_shard_id.is_shard_zero() {
1213 5868 : for kind in [
1214 1956 : SlruKind::Clog,
1215 1956 : SlruKind::MultiXactMembers,
1216 1956 : SlruKind::MultiXactOffsets,
1217 : ] {
1218 5868 : let slrudir_key = slru_dir_to_key(kind);
1219 5868 : result.add_key(slrudir_key);
1220 5868 : let buf = self.get(slrudir_key, lsn, ctx).await?;
1221 5868 : let dir = SlruSegmentDirectory::des(&buf)?;
1222 5868 : let mut segments: Vec<u32> = dir.segments.iter().cloned().collect();
1223 5868 : segments.sort_unstable();
1224 5868 : for segno in segments {
1225 0 : let segsize_key = slru_segment_size_to_key(kind, segno);
1226 0 : let mut buf = self.get(segsize_key, lsn, ctx).await?;
1227 0 : let segsize = buf.get_u32_le();
1228 0 :
1229 0 : result.add_range(
1230 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, segsize),
1231 0 : );
1232 0 : result.add_key(segsize_key);
1233 : }
1234 : }
1235 36 : }
1236 :
1237 : // Then pg_twophase
1238 1992 : result.add_key(TWOPHASEDIR_KEY);
1239 :
1240 1992 : let mut xids: Vec<u64> = self
1241 1992 : .list_twophase_files(lsn, ctx)
1242 1992 : .await?
1243 1992 : .iter()
1244 1992 : .cloned()
1245 1992 : .collect();
1246 1992 : xids.sort_unstable();
1247 1992 : for xid in xids {
1248 0 : result.add_key(twophase_file_key(xid));
1249 0 : }
1250 :
1251 1992 : result.add_key(CONTROLFILE_KEY);
1252 1992 : result.add_key(CHECKPOINT_KEY);
1253 1992 :
1254 1992 : // Add extra keyspaces in the test cases. Some test cases write keys into the storage without
1255 1992 : // creating directory keys. These test cases will add such keyspaces into `extra_test_dense_keyspace`
1256 1992 : // and the keys will not be garbage-colllected.
1257 1992 : #[cfg(test)]
1258 1992 : {
1259 1992 : let guard = self.extra_test_dense_keyspace.load();
1260 1992 : for kr in &guard.ranges {
1261 0 : result.add_range(kr.clone());
1262 0 : }
1263 0 : }
1264 0 :
1265 1992 : let dense_keyspace = result.to_keyspace();
1266 1992 : let sparse_keyspace = SparseKeySpace(KeySpace {
1267 1992 : ranges: vec![
1268 1992 : Key::metadata_aux_key_range(),
1269 1992 : repl_origin_key_range(),
1270 1992 : Key::rel_dir_sparse_key_range(),
1271 1992 : ],
1272 1992 : });
1273 1992 :
1274 1992 : if cfg!(debug_assertions) {
1275 : // Verify if the sparse keyspaces are ordered and non-overlapping.
1276 :
1277 : // We do not use KeySpaceAccum for sparse_keyspace because we want to ensure each
1278 : // category of sparse keys are split into their own image/delta files. If there
1279 : // are overlapping keyspaces, they will be automatically merged by keyspace accum,
1280 : // and we want the developer to keep the keyspaces separated.
1281 :
1282 1992 : let ranges = &sparse_keyspace.0.ranges;
1283 :
1284 : // TODO: use a single overlaps_with across the codebase
1285 5976 : fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
1286 5976 : !(a.end <= b.start || b.end <= a.start)
1287 5976 : }
1288 5976 : for i in 0..ranges.len() {
1289 5976 : for j in 0..i {
1290 5976 : if overlaps_with(&ranges[i], &ranges[j]) {
1291 0 : panic!(
1292 0 : "overlapping sparse keyspace: {}..{} and {}..{}",
1293 0 : ranges[i].start, ranges[i].end, ranges[j].start, ranges[j].end
1294 0 : );
1295 5976 : }
1296 : }
1297 : }
1298 3984 : for i in 1..ranges.len() {
1299 3984 : assert!(
1300 3984 : ranges[i - 1].end <= ranges[i].start,
1301 0 : "unordered sparse keyspace: {}..{} and {}..{}",
1302 0 : ranges[i - 1].start,
1303 0 : ranges[i - 1].end,
1304 0 : ranges[i].start,
1305 0 : ranges[i].end
1306 : );
1307 : }
1308 0 : }
1309 :
1310 1992 : Ok((dense_keyspace, sparse_keyspace))
1311 1992 : }
1312 :
1313 : /// Get cached size of relation if it not updated after specified LSN
1314 2691240 : pub fn get_cached_rel_size(&self, tag: &RelTag, lsn: Lsn) -> Option<BlockNumber> {
1315 2691240 : let rel_size_cache = self.rel_size_cache.read().unwrap();
1316 2691240 : if let Some((cached_lsn, nblocks)) = rel_size_cache.map.get(tag) {
1317 2691108 : if lsn >= *cached_lsn {
1318 2660232 : RELSIZE_CACHE_HITS.inc();
1319 2660232 : return Some(*nblocks);
1320 30876 : }
1321 30876 : RELSIZE_CACHE_MISSES_OLD.inc();
1322 132 : }
1323 31008 : RELSIZE_CACHE_MISSES.inc();
1324 31008 : None
1325 2691240 : }
1326 :
1327 : /// Update cached relation size if there is no more recent update
1328 30816 : pub fn update_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1329 30816 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1330 30816 :
1331 30816 : if lsn < rel_size_cache.complete_as_of {
1332 : // Do not cache old values. It's safe to cache the size on read, as long as
1333 : // the read was at an LSN since we started the WAL ingestion. Reasoning: we
1334 : // never evict values from the cache, so if the relation size changed after
1335 : // 'lsn', the new value is already in the cache.
1336 0 : return;
1337 30816 : }
1338 30816 :
1339 30816 : match rel_size_cache.map.entry(tag) {
1340 30816 : hash_map::Entry::Occupied(mut entry) => {
1341 30816 : let cached_lsn = entry.get_mut();
1342 30816 : if lsn >= cached_lsn.0 {
1343 0 : *cached_lsn = (lsn, nblocks);
1344 30816 : }
1345 : }
1346 0 : hash_map::Entry::Vacant(entry) => {
1347 0 : entry.insert((lsn, nblocks));
1348 0 : RELSIZE_CACHE_ENTRIES.inc();
1349 0 : }
1350 : }
1351 30816 : }
1352 :
1353 : /// Store cached relation size
1354 1696320 : pub fn set_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1355 1696320 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1356 1696320 : if rel_size_cache.map.insert(tag, (lsn, nblocks)).is_none() {
1357 11520 : RELSIZE_CACHE_ENTRIES.inc();
1358 1684800 : }
1359 1696320 : }
1360 :
1361 : /// Remove cached relation size
1362 12 : pub fn remove_cached_rel_size(&self, tag: &RelTag) {
1363 12 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1364 12 : if rel_size_cache.map.remove(tag).is_some() {
1365 12 : RELSIZE_CACHE_ENTRIES.dec();
1366 12 : }
1367 12 : }
1368 : }
1369 :
1370 : /// DatadirModification represents an operation to ingest an atomic set of
1371 : /// updates to the repository.
1372 : ///
1373 : /// It is created by the 'begin_record' function. It is called for each WAL
1374 : /// record, so that all the modifications by a one WAL record appear atomic.
1375 : pub struct DatadirModification<'a> {
1376 : /// The timeline this modification applies to. You can access this to
1377 : /// read the state, but note that any pending updates are *not* reflected
1378 : /// in the state in 'tline' yet.
1379 : pub tline: &'a Timeline,
1380 :
1381 : /// Current LSN of the modification
1382 : lsn: Lsn,
1383 :
1384 : // The modifications are not applied directly to the underlying key-value store.
1385 : // The put-functions add the modifications here, and they are flushed to the
1386 : // underlying key-value store by the 'finish' function.
1387 : pending_lsns: Vec<Lsn>,
1388 : pending_deletions: Vec<(Range<Key>, Lsn)>,
1389 : pending_nblocks: i64,
1390 :
1391 : /// Metadata writes, indexed by key so that they can be read from not-yet-committed modifications
1392 : /// while ingesting subsequent records. See [`Self::is_data_key`] for the definition of 'metadata'.
1393 : pending_metadata_pages: HashMap<CompactKey, Vec<(Lsn, usize, Value)>>,
1394 :
1395 : /// Data writes, ready to be flushed into an ephemeral layer. See [`Self::is_data_key`] for
1396 : /// which keys are stored here.
1397 : pending_data_batch: Option<SerializedValueBatch>,
1398 :
1399 : /// For special "directory" keys that store key-value maps, track the size of the map
1400 : /// if it was updated in this modification.
1401 : pending_directory_entries: Vec<(DirectoryKind, MetricsUpdate)>,
1402 :
1403 : /// An **approximation** of how many metadata bytes will be written to the EphemeralFile.
1404 : pending_metadata_bytes: usize,
1405 : }
1406 :
1407 : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1408 : pub enum MetricsUpdate {
1409 : /// Set the metrics to this value
1410 : Set(u64),
1411 : /// Increment the metrics by this value
1412 : Add(u64),
1413 : /// Decrement the metrics by this value
1414 : Sub(u64),
1415 : }
1416 :
1417 : impl DatadirModification<'_> {
1418 : // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
1419 : // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
1420 : // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
1421 : pub(crate) const MAX_PENDING_BYTES: usize = 8 * 1024 * 1024;
1422 :
1423 : /// Get the current lsn
1424 2508348 : pub(crate) fn get_lsn(&self) -> Lsn {
1425 2508348 : self.lsn
1426 2508348 : }
1427 :
1428 0 : pub(crate) fn approx_pending_bytes(&self) -> usize {
1429 0 : self.pending_data_batch
1430 0 : .as_ref()
1431 0 : .map_or(0, |b| b.buffer_size())
1432 0 : + self.pending_metadata_bytes
1433 0 : }
1434 :
1435 0 : pub(crate) fn has_dirty_data(&self) -> bool {
1436 0 : self.pending_data_batch
1437 0 : .as_ref()
1438 0 : .is_some_and(|b| b.has_data())
1439 0 : }
1440 :
1441 : /// Returns statistics about the currently pending modifications.
1442 0 : pub(crate) fn stats(&self) -> DatadirModificationStats {
1443 0 : let mut stats = DatadirModificationStats::default();
1444 0 : for (_, _, value) in self.pending_metadata_pages.values().flatten() {
1445 0 : match value {
1446 0 : Value::Image(_) => stats.metadata_images += 1,
1447 0 : Value::WalRecord(r) if r.will_init() => stats.metadata_images += 1,
1448 0 : Value::WalRecord(_) => stats.metadata_deltas += 1,
1449 : }
1450 : }
1451 0 : for valuemeta in self.pending_data_batch.iter().flat_map(|b| &b.metadata) {
1452 0 : match valuemeta {
1453 0 : ValueMeta::Serialized(s) if s.will_init => stats.data_images += 1,
1454 0 : ValueMeta::Serialized(_) => stats.data_deltas += 1,
1455 0 : ValueMeta::Observed(_) => {}
1456 : }
1457 : }
1458 0 : stats
1459 0 : }
1460 :
1461 : /// Set the current lsn
1462 875148 : pub(crate) fn set_lsn(&mut self, lsn: Lsn) -> Result<(), WalIngestError> {
1463 875148 : ensure_walingest!(
1464 875148 : lsn >= self.lsn,
1465 875148 : "setting an older lsn {} than {} is not allowed",
1466 875148 : lsn,
1467 875148 : self.lsn
1468 875148 : );
1469 :
1470 875148 : if lsn > self.lsn {
1471 875148 : self.pending_lsns.push(self.lsn);
1472 875148 : self.lsn = lsn;
1473 875148 : }
1474 875148 : Ok(())
1475 875148 : }
1476 :
1477 : /// In this context, 'metadata' means keys that are only read by the pageserver internally, and 'data' means
1478 : /// keys that represent literal blocks that postgres can read. So data includes relation blocks and
1479 : /// SLRU blocks, which are read directly by postgres, and everything else is considered metadata.
1480 : ///
1481 : /// The distinction is important because data keys are handled on a fast path where dirty writes are
1482 : /// not readable until this modification is committed, whereas metadata keys are visible for read
1483 : /// via [`Self::get`] as soon as their record has been ingested.
1484 5104164 : fn is_data_key(key: &Key) -> bool {
1485 5104164 : key.is_rel_block_key() || key.is_slru_block_key()
1486 5104164 : }
1487 :
1488 : /// Initialize a completely new repository.
1489 : ///
1490 : /// This inserts the directory metadata entries that are assumed to
1491 : /// always exist.
1492 1308 : pub fn init_empty(&mut self) -> anyhow::Result<()> {
1493 1308 : let buf = DbDirectory::ser(&DbDirectory {
1494 1308 : dbdirs: HashMap::new(),
1495 1308 : })?;
1496 1308 : self.pending_directory_entries
1497 1308 : .push((DirectoryKind::Db, MetricsUpdate::Set(0)));
1498 1308 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1499 :
1500 1308 : let buf = if self.tline.pg_version >= 17 {
1501 1152 : TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
1502 1152 : xids: HashSet::new(),
1503 1152 : })
1504 : } else {
1505 156 : TwoPhaseDirectory::ser(&TwoPhaseDirectory {
1506 156 : xids: HashSet::new(),
1507 156 : })
1508 0 : }?;
1509 1308 : self.pending_directory_entries
1510 1308 : .push((DirectoryKind::TwoPhase, MetricsUpdate::Set(0)));
1511 1308 : self.put(TWOPHASEDIR_KEY, Value::Image(buf.into()));
1512 :
1513 1308 : let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
1514 1308 : let empty_dir = Value::Image(buf);
1515 1308 :
1516 1308 : // Initialize SLRUs on shard 0 only: creating these on other shards would be
1517 1308 : // harmless but they'd just be dropped on later compaction.
1518 1308 : if self.tline.tenant_shard_id.is_shard_zero() {
1519 1272 : self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
1520 1272 : self.pending_directory_entries.push((
1521 1272 : DirectoryKind::SlruSegment(SlruKind::Clog),
1522 1272 : MetricsUpdate::Set(0),
1523 1272 : ));
1524 1272 : self.put(
1525 1272 : slru_dir_to_key(SlruKind::MultiXactMembers),
1526 1272 : empty_dir.clone(),
1527 1272 : );
1528 1272 : self.pending_directory_entries.push((
1529 1272 : DirectoryKind::SlruSegment(SlruKind::Clog),
1530 1272 : MetricsUpdate::Set(0),
1531 1272 : ));
1532 1272 : self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
1533 1272 : self.pending_directory_entries.push((
1534 1272 : DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets),
1535 1272 : MetricsUpdate::Set(0),
1536 1272 : ));
1537 1272 : }
1538 :
1539 1308 : Ok(())
1540 1308 : }
1541 :
1542 : #[cfg(test)]
1543 1296 : pub fn init_empty_test_timeline(&mut self) -> anyhow::Result<()> {
1544 1296 : self.init_empty()?;
1545 1296 : self.put_control_file(bytes::Bytes::from_static(
1546 1296 : b"control_file contents do not matter",
1547 1296 : ))
1548 1296 : .context("put_control_file")?;
1549 1296 : self.put_checkpoint(bytes::Bytes::from_static(
1550 1296 : b"checkpoint_file contents do not matter",
1551 1296 : ))
1552 1296 : .context("put_checkpoint_file")?;
1553 1296 : Ok(())
1554 1296 : }
1555 :
1556 : /// Creates a relation if it is not already present.
1557 : /// Returns the current size of the relation
1558 2508336 : pub(crate) async fn create_relation_if_required(
1559 2508336 : &mut self,
1560 2508336 : rel: RelTag,
1561 2508336 : ctx: &RequestContext,
1562 2508336 : ) -> Result<u32, WalIngestError> {
1563 : // Get current size and put rel creation if rel doesn't exist
1564 : //
1565 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1566 : // check the cache too. This is because eagerly checking the cache results in
1567 : // less work overall and 10% better performance. It's more work on cache miss
1568 : // but cache miss is rare.
1569 2508336 : if let Some(nblocks) = self.tline.get_cached_rel_size(&rel, self.get_lsn()) {
1570 2508276 : Ok(nblocks)
1571 60 : } else if !self
1572 60 : .tline
1573 60 : .get_rel_exists(rel, Version::Modified(self), ctx)
1574 60 : .await?
1575 : {
1576 : // create it with 0 size initially, the logic below will extend it
1577 60 : self.put_rel_creation(rel, 0, ctx).await?;
1578 60 : Ok(0)
1579 : } else {
1580 0 : Ok(self
1581 0 : .tline
1582 0 : .get_rel_size(rel, Version::Modified(self), ctx)
1583 0 : .await?)
1584 : }
1585 2508336 : }
1586 :
1587 : /// Given a block number for a relation (which represents a newly written block),
1588 : /// the previous block count of the relation, and the shard info, find the gaps
1589 : /// that were created by the newly written block if any.
1590 874020 : fn find_gaps(
1591 874020 : rel: RelTag,
1592 874020 : blkno: u32,
1593 874020 : previous_nblocks: u32,
1594 874020 : shard: &ShardIdentity,
1595 874020 : ) -> Option<KeySpace> {
1596 874020 : let mut key = rel_block_to_key(rel, blkno);
1597 874020 : let mut gap_accum = None;
1598 :
1599 874020 : for gap_blkno in previous_nblocks..blkno {
1600 192 : key.field6 = gap_blkno;
1601 192 :
1602 192 : if shard.get_shard_number(&key) != shard.number {
1603 48 : continue;
1604 144 : }
1605 144 :
1606 144 : gap_accum
1607 144 : .get_or_insert_with(KeySpaceAccum::new)
1608 144 : .add_key(key);
1609 : }
1610 :
1611 874020 : gap_accum.map(|accum| accum.to_keyspace())
1612 874020 : }
1613 :
1614 875112 : pub async fn ingest_batch(
1615 875112 : &mut self,
1616 875112 : mut batch: SerializedValueBatch,
1617 875112 : // TODO(vlad): remove this argument and replace the shard check with is_key_local
1618 875112 : shard: &ShardIdentity,
1619 875112 : ctx: &RequestContext,
1620 875112 : ) -> Result<(), WalIngestError> {
1621 875112 : let mut gaps_at_lsns = Vec::default();
1622 :
1623 875112 : for meta in batch.metadata.iter() {
1624 873852 : let key = Key::from_compact(meta.key());
1625 873852 : let (rel, blkno) = key
1626 873852 : .to_rel_block()
1627 873852 : .map_err(|_| WalIngestErrorKind::InvalidKey(key, meta.lsn()))?;
1628 873852 : let new_nblocks = blkno + 1;
1629 :
1630 873852 : let old_nblocks = self.create_relation_if_required(rel, ctx).await?;
1631 873852 : if new_nblocks > old_nblocks {
1632 14340 : self.put_rel_extend(rel, new_nblocks, ctx).await?;
1633 859512 : }
1634 :
1635 873852 : if let Some(gaps) = Self::find_gaps(rel, blkno, old_nblocks, shard) {
1636 0 : gaps_at_lsns.push((gaps, meta.lsn()));
1637 873852 : }
1638 : }
1639 :
1640 875112 : if !gaps_at_lsns.is_empty() {
1641 0 : batch.zero_gaps(gaps_at_lsns);
1642 875112 : }
1643 :
1644 875112 : match self.pending_data_batch.as_mut() {
1645 120 : Some(pending_batch) => {
1646 120 : pending_batch.extend(batch);
1647 120 : }
1648 874992 : None if batch.has_data() => {
1649 873780 : self.pending_data_batch = Some(batch);
1650 873780 : }
1651 1212 : None => {
1652 1212 : // Nothing to initialize the batch with
1653 1212 : }
1654 : }
1655 :
1656 875112 : Ok(())
1657 875112 : }
1658 :
1659 : /// Put a new page version that can be constructed from a WAL record
1660 : ///
1661 : /// NOTE: this will *not* implicitly extend the relation, if the page is beyond the
1662 : /// current end-of-file. It's up to the caller to check that the relation size
1663 : /// matches the blocks inserted!
1664 72 : pub fn put_rel_wal_record(
1665 72 : &mut self,
1666 72 : rel: RelTag,
1667 72 : blknum: BlockNumber,
1668 72 : rec: NeonWalRecord,
1669 72 : ) -> Result<(), WalIngestError> {
1670 72 : ensure_walingest!(rel.relnode != 0, RelationError::InvalidRelnode);
1671 72 : self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
1672 72 : Ok(())
1673 72 : }
1674 :
1675 : // Same, but for an SLRU.
1676 48 : pub fn put_slru_wal_record(
1677 48 : &mut self,
1678 48 : kind: SlruKind,
1679 48 : segno: u32,
1680 48 : blknum: BlockNumber,
1681 48 : rec: NeonWalRecord,
1682 48 : ) -> Result<(), WalIngestError> {
1683 48 : if !self.tline.tenant_shard_id.is_shard_zero() {
1684 0 : return Ok(());
1685 48 : }
1686 48 :
1687 48 : self.put(
1688 48 : slru_block_to_key(kind, segno, blknum),
1689 48 : Value::WalRecord(rec),
1690 48 : );
1691 48 : Ok(())
1692 48 : }
1693 :
1694 : /// Like put_wal_record, but with ready-made image of the page.
1695 1667052 : pub fn put_rel_page_image(
1696 1667052 : &mut self,
1697 1667052 : rel: RelTag,
1698 1667052 : blknum: BlockNumber,
1699 1667052 : img: Bytes,
1700 1667052 : ) -> Result<(), WalIngestError> {
1701 1667052 : ensure_walingest!(rel.relnode != 0, RelationError::InvalidRelnode);
1702 1667052 : let key = rel_block_to_key(rel, blknum);
1703 1667052 : if !key.is_valid_key_on_write_path() {
1704 0 : Err(WalIngestErrorKind::InvalidKey(key, self.lsn))?;
1705 1667052 : }
1706 1667052 : self.put(rel_block_to_key(rel, blknum), Value::Image(img));
1707 1667052 : Ok(())
1708 1667052 : }
1709 :
1710 36 : pub fn put_slru_page_image(
1711 36 : &mut self,
1712 36 : kind: SlruKind,
1713 36 : segno: u32,
1714 36 : blknum: BlockNumber,
1715 36 : img: Bytes,
1716 36 : ) -> Result<(), WalIngestError> {
1717 36 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1718 :
1719 36 : let key = slru_block_to_key(kind, segno, blknum);
1720 36 : if !key.is_valid_key_on_write_path() {
1721 0 : Err(WalIngestErrorKind::InvalidKey(key, self.lsn))?;
1722 36 : }
1723 36 : self.put(key, Value::Image(img));
1724 36 : Ok(())
1725 36 : }
1726 :
1727 17988 : pub(crate) fn put_rel_page_image_zero(
1728 17988 : &mut self,
1729 17988 : rel: RelTag,
1730 17988 : blknum: BlockNumber,
1731 17988 : ) -> Result<(), WalIngestError> {
1732 17988 : ensure_walingest!(rel.relnode != 0, RelationError::InvalidRelnode);
1733 17988 : let key = rel_block_to_key(rel, blknum);
1734 17988 : if !key.is_valid_key_on_write_path() {
1735 0 : Err(WalIngestErrorKind::InvalidKey(key, self.lsn))?;
1736 17988 : }
1737 :
1738 17988 : let batch = self
1739 17988 : .pending_data_batch
1740 17988 : .get_or_insert_with(SerializedValueBatch::default);
1741 17988 :
1742 17988 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1743 17988 :
1744 17988 : Ok(())
1745 17988 : }
1746 :
1747 0 : pub(crate) fn put_slru_page_image_zero(
1748 0 : &mut self,
1749 0 : kind: SlruKind,
1750 0 : segno: u32,
1751 0 : blknum: BlockNumber,
1752 0 : ) -> Result<(), WalIngestError> {
1753 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1754 0 : let key = slru_block_to_key(kind, segno, blknum);
1755 0 : if !key.is_valid_key_on_write_path() {
1756 0 : Err(WalIngestErrorKind::InvalidKey(key, self.lsn))?;
1757 0 : }
1758 :
1759 0 : let batch = self
1760 0 : .pending_data_batch
1761 0 : .get_or_insert_with(SerializedValueBatch::default);
1762 0 :
1763 0 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1764 0 :
1765 0 : Ok(())
1766 0 : }
1767 :
1768 : /// Returns `true` if the rel_size_v2 write path is enabled. If it is the first time that
1769 : /// we enable it, we also need to persist it in `index_part.json`.
1770 11676 : pub fn maybe_enable_rel_size_v2(&mut self) -> anyhow::Result<bool> {
1771 11676 : let status = self.tline.get_rel_size_v2_status();
1772 11676 : let config = self.tline.get_rel_size_v2_enabled();
1773 11676 : match (config, status) {
1774 : (false, RelSizeMigration::Legacy) => {
1775 : // tenant config didn't enable it and we didn't write any reldir_v2 key yet
1776 11676 : Ok(false)
1777 : }
1778 : (false, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
1779 : // index_part already persisted that the timeline has enabled rel_size_v2
1780 0 : Ok(true)
1781 : }
1782 : (true, RelSizeMigration::Legacy) => {
1783 : // The first time we enable it, we need to persist it in `index_part.json`
1784 0 : self.tline
1785 0 : .update_rel_size_v2_status(RelSizeMigration::Migrating)?;
1786 0 : tracing::info!("enabled rel_size_v2");
1787 0 : Ok(true)
1788 : }
1789 : (true, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
1790 : // index_part already persisted that the timeline has enabled rel_size_v2
1791 : // and we don't need to do anything
1792 0 : Ok(true)
1793 : }
1794 : }
1795 11676 : }
1796 :
1797 : /// Store a relmapper file (pg_filenode.map) in the repository
1798 96 : pub async fn put_relmap_file(
1799 96 : &mut self,
1800 96 : spcnode: Oid,
1801 96 : dbnode: Oid,
1802 96 : img: Bytes,
1803 96 : ctx: &RequestContext,
1804 96 : ) -> Result<(), WalIngestError> {
1805 96 : let v2_enabled = self
1806 96 : .maybe_enable_rel_size_v2()
1807 96 : .map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
1808 :
1809 : // Add it to the directory (if it doesn't exist already)
1810 96 : let buf = self.get(DBDIR_KEY, ctx).await?;
1811 96 : let mut dbdir = DbDirectory::des(&buf)?;
1812 :
1813 96 : let r = dbdir.dbdirs.insert((spcnode, dbnode), true);
1814 96 : if r.is_none() || r == Some(false) {
1815 : // The dbdir entry didn't exist, or it contained a
1816 : // 'false'. The 'insert' call already updated it with
1817 : // 'true', now write the updated 'dbdirs' map back.
1818 96 : let buf = DbDirectory::ser(&dbdir)?;
1819 96 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1820 0 : }
1821 96 : if r.is_none() {
1822 : // Create RelDirectory
1823 : // TODO: if we have fully migrated to v2, no need to create this directory
1824 48 : let buf = RelDirectory::ser(&RelDirectory {
1825 48 : rels: HashSet::new(),
1826 48 : })?;
1827 48 : self.pending_directory_entries
1828 48 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
1829 48 : if v2_enabled {
1830 0 : self.pending_directory_entries
1831 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
1832 48 : }
1833 48 : self.put(
1834 48 : rel_dir_to_key(spcnode, dbnode),
1835 48 : Value::Image(Bytes::from(buf)),
1836 48 : );
1837 48 : }
1838 :
1839 96 : self.put(relmap_file_key(spcnode, dbnode), Value::Image(img));
1840 96 : Ok(())
1841 96 : }
1842 :
1843 0 : pub async fn put_twophase_file(
1844 0 : &mut self,
1845 0 : xid: u64,
1846 0 : img: Bytes,
1847 0 : ctx: &RequestContext,
1848 0 : ) -> Result<(), WalIngestError> {
1849 : // Add it to the directory entry
1850 0 : let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1851 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1852 0 : let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
1853 0 : if !dir.xids.insert(xid) {
1854 0 : Err(WalIngestErrorKind::FileAlreadyExists(xid))?;
1855 0 : }
1856 0 : self.pending_directory_entries.push((
1857 0 : DirectoryKind::TwoPhase,
1858 0 : MetricsUpdate::Set(dir.xids.len() as u64),
1859 0 : ));
1860 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1861 : } else {
1862 0 : let xid = xid as u32;
1863 0 : let mut dir = TwoPhaseDirectory::des(&dirbuf)?;
1864 0 : if !dir.xids.insert(xid) {
1865 0 : Err(WalIngestErrorKind::FileAlreadyExists(xid.into()))?;
1866 0 : }
1867 0 : self.pending_directory_entries.push((
1868 0 : DirectoryKind::TwoPhase,
1869 0 : MetricsUpdate::Set(dir.xids.len() as u64),
1870 0 : ));
1871 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
1872 : };
1873 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
1874 0 :
1875 0 : self.put(twophase_file_key(xid), Value::Image(img));
1876 0 : Ok(())
1877 0 : }
1878 :
1879 0 : pub async fn set_replorigin(
1880 0 : &mut self,
1881 0 : origin_id: RepOriginId,
1882 0 : origin_lsn: Lsn,
1883 0 : ) -> Result<(), WalIngestError> {
1884 0 : let key = repl_origin_key(origin_id);
1885 0 : self.put(key, Value::Image(origin_lsn.ser().unwrap().into()));
1886 0 : Ok(())
1887 0 : }
1888 :
1889 0 : pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> Result<(), WalIngestError> {
1890 0 : self.set_replorigin(origin_id, Lsn::INVALID).await
1891 0 : }
1892 :
1893 1308 : pub fn put_control_file(&mut self, img: Bytes) -> Result<(), WalIngestError> {
1894 1308 : self.put(CONTROLFILE_KEY, Value::Image(img));
1895 1308 : Ok(())
1896 1308 : }
1897 :
1898 1392 : pub fn put_checkpoint(&mut self, img: Bytes) -> Result<(), WalIngestError> {
1899 1392 : self.put(CHECKPOINT_KEY, Value::Image(img));
1900 1392 : Ok(())
1901 1392 : }
1902 :
1903 0 : pub async fn drop_dbdir(
1904 0 : &mut self,
1905 0 : spcnode: Oid,
1906 0 : dbnode: Oid,
1907 0 : ctx: &RequestContext,
1908 0 : ) -> Result<(), WalIngestError> {
1909 0 : let total_blocks = self
1910 0 : .tline
1911 0 : .get_db_size(spcnode, dbnode, Version::Modified(self), ctx)
1912 0 : .await?;
1913 :
1914 : // Remove entry from dbdir
1915 0 : let buf = self.get(DBDIR_KEY, ctx).await?;
1916 0 : let mut dir = DbDirectory::des(&buf)?;
1917 0 : if dir.dbdirs.remove(&(spcnode, dbnode)).is_some() {
1918 0 : let buf = DbDirectory::ser(&dir)?;
1919 0 : self.pending_directory_entries.push((
1920 0 : DirectoryKind::Db,
1921 0 : MetricsUpdate::Set(dir.dbdirs.len() as u64),
1922 0 : ));
1923 0 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1924 : } else {
1925 0 : warn!(
1926 0 : "dropped dbdir for spcnode {} dbnode {} did not exist in db directory",
1927 : spcnode, dbnode
1928 : );
1929 : }
1930 :
1931 : // Update logical database size.
1932 0 : self.pending_nblocks -= total_blocks as i64;
1933 0 :
1934 0 : // Delete all relations and metadata files for the spcnode/dnode
1935 0 : self.delete(dbdir_key_range(spcnode, dbnode));
1936 0 : Ok(())
1937 0 : }
1938 :
1939 : /// Create a relation fork.
1940 : ///
1941 : /// 'nblocks' is the initial size.
1942 11520 : pub async fn put_rel_creation(
1943 11520 : &mut self,
1944 11520 : rel: RelTag,
1945 11520 : nblocks: BlockNumber,
1946 11520 : ctx: &RequestContext,
1947 11520 : ) -> Result<(), WalIngestError> {
1948 11520 : if rel.relnode == 0 {
1949 0 : Err(WalIngestErrorKind::LogicalError(anyhow::anyhow!(
1950 0 : "invalid relnode"
1951 0 : )))?;
1952 11520 : }
1953 : // It's possible that this is the first rel for this db in this
1954 : // tablespace. Create the reldir entry for it if so.
1955 11520 : let mut dbdir = DbDirectory::des(&self.get(DBDIR_KEY, ctx).await?)?;
1956 :
1957 11520 : let dbdir_exists =
1958 11520 : if let hash_map::Entry::Vacant(e) = dbdir.dbdirs.entry((rel.spcnode, rel.dbnode)) {
1959 : // Didn't exist. Update dbdir
1960 48 : e.insert(false);
1961 48 : let buf = DbDirectory::ser(&dbdir)?;
1962 48 : self.pending_directory_entries.push((
1963 48 : DirectoryKind::Db,
1964 48 : MetricsUpdate::Set(dbdir.dbdirs.len() as u64),
1965 48 : ));
1966 48 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1967 48 : false
1968 : } else {
1969 11472 : true
1970 : };
1971 :
1972 11520 : let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1973 11520 : let mut rel_dir = if !dbdir_exists {
1974 : // Create the RelDirectory
1975 48 : RelDirectory::default()
1976 : } else {
1977 : // reldir already exists, fetch it
1978 11472 : RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?
1979 : };
1980 :
1981 11520 : let v2_enabled = self
1982 11520 : .maybe_enable_rel_size_v2()
1983 11520 : .map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
1984 :
1985 11520 : if v2_enabled {
1986 0 : if rel_dir.rels.contains(&(rel.relnode, rel.forknum)) {
1987 0 : Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
1988 0 : }
1989 0 : let sparse_rel_dir_key =
1990 0 : rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
1991 : // check if the rel_dir_key exists in v2
1992 0 : let val = self.sparse_get(sparse_rel_dir_key, ctx).await?;
1993 0 : let val = RelDirExists::decode_option(val)
1994 0 : .map_err(|_| WalIngestErrorKind::InvalidRelDirKey(sparse_rel_dir_key))?;
1995 0 : if val == RelDirExists::Exists {
1996 0 : Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
1997 0 : }
1998 0 : self.put(
1999 0 : sparse_rel_dir_key,
2000 0 : Value::Image(RelDirExists::Exists.encode()),
2001 0 : );
2002 0 : if !dbdir_exists {
2003 0 : self.pending_directory_entries
2004 0 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
2005 0 : self.pending_directory_entries
2006 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
2007 0 : // We don't write `rel_dir_key -> rel_dir.rels` back to the storage in the v2 path unless it's the initial creation.
2008 0 : // TODO: if we have fully migrated to v2, no need to create this directory. Otherwise, there
2009 0 : // will be key not found errors if we don't create an empty one for rel_size_v2.
2010 0 : self.put(
2011 0 : rel_dir_key,
2012 0 : Value::Image(Bytes::from(RelDirectory::ser(&RelDirectory::default())?)),
2013 : );
2014 0 : }
2015 0 : self.pending_directory_entries
2016 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
2017 : } else {
2018 : // Add the new relation to the rel directory entry, and write it back
2019 11520 : if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
2020 0 : Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
2021 11520 : }
2022 11520 : if !dbdir_exists {
2023 48 : self.pending_directory_entries
2024 48 : .push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
2025 11472 : }
2026 11520 : self.pending_directory_entries
2027 11520 : .push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
2028 11520 : self.put(
2029 11520 : rel_dir_key,
2030 11520 : Value::Image(Bytes::from(RelDirectory::ser(&rel_dir)?)),
2031 : );
2032 : }
2033 :
2034 : // Put size
2035 11520 : let size_key = rel_size_to_key(rel);
2036 11520 : let buf = nblocks.to_le_bytes();
2037 11520 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2038 11520 :
2039 11520 : self.pending_nblocks += nblocks as i64;
2040 11520 :
2041 11520 : // Update relation size cache
2042 11520 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2043 11520 :
2044 11520 : // Even if nblocks > 0, we don't insert any actual blocks here. That's up to the
2045 11520 : // caller.
2046 11520 : Ok(())
2047 11520 : }
2048 :
2049 : /// Truncate relation
2050 36072 : pub async fn put_rel_truncation(
2051 36072 : &mut self,
2052 36072 : rel: RelTag,
2053 36072 : nblocks: BlockNumber,
2054 36072 : ctx: &RequestContext,
2055 36072 : ) -> Result<(), WalIngestError> {
2056 36072 : ensure_walingest!(rel.relnode != 0, RelationError::InvalidRelnode);
2057 36072 : if self
2058 36072 : .tline
2059 36072 : .get_rel_exists(rel, Version::Modified(self), ctx)
2060 36072 : .await?
2061 : {
2062 36072 : let size_key = rel_size_to_key(rel);
2063 : // Fetch the old size first
2064 36072 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2065 36072 :
2066 36072 : // Update the entry with the new size.
2067 36072 : let buf = nblocks.to_le_bytes();
2068 36072 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2069 36072 :
2070 36072 : // Update relation size cache
2071 36072 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2072 36072 :
2073 36072 : // Update logical database size.
2074 36072 : self.pending_nblocks -= old_size as i64 - nblocks as i64;
2075 0 : }
2076 36072 : Ok(())
2077 36072 : }
2078 :
2079 : /// Extend relation
2080 : /// If new size is smaller, do nothing.
2081 1660080 : pub async fn put_rel_extend(
2082 1660080 : &mut self,
2083 1660080 : rel: RelTag,
2084 1660080 : nblocks: BlockNumber,
2085 1660080 : ctx: &RequestContext,
2086 1660080 : ) -> Result<(), WalIngestError> {
2087 1660080 : ensure_walingest!(rel.relnode != 0, RelationError::InvalidRelnode);
2088 :
2089 : // Put size
2090 1660080 : let size_key = rel_size_to_key(rel);
2091 1660080 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2092 1660080 :
2093 1660080 : // only extend relation here. never decrease the size
2094 1660080 : if nblocks > old_size {
2095 1648728 : let buf = nblocks.to_le_bytes();
2096 1648728 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2097 1648728 :
2098 1648728 : // Update relation size cache
2099 1648728 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
2100 1648728 :
2101 1648728 : self.pending_nblocks += nblocks as i64 - old_size as i64;
2102 1648728 : }
2103 1660080 : Ok(())
2104 1660080 : }
2105 :
2106 : /// Drop some relations
2107 60 : pub(crate) async fn put_rel_drops(
2108 60 : &mut self,
2109 60 : drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
2110 60 : ctx: &RequestContext,
2111 60 : ) -> Result<(), WalIngestError> {
2112 60 : let v2_enabled = self
2113 60 : .maybe_enable_rel_size_v2()
2114 60 : .map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
2115 72 : for ((spc_node, db_node), rel_tags) in drop_relations {
2116 12 : let dir_key = rel_dir_to_key(spc_node, db_node);
2117 12 : let buf = self.get(dir_key, ctx).await?;
2118 12 : let mut dir = RelDirectory::des(&buf)?;
2119 :
2120 12 : let mut dirty = false;
2121 24 : for rel_tag in rel_tags {
2122 12 : let found = if dir.rels.remove(&(rel_tag.relnode, rel_tag.forknum)) {
2123 12 : self.pending_directory_entries
2124 12 : .push((DirectoryKind::Rel, MetricsUpdate::Sub(1)));
2125 12 : dirty = true;
2126 12 : true
2127 0 : } else if v2_enabled {
2128 : // The rel is not found in the old reldir key, so we need to check the new sparse keyspace.
2129 : // Note that a relation can only exist in one of the two keyspaces (guaranteed by the ingestion
2130 : // logic).
2131 0 : let key =
2132 0 : rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
2133 0 : let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
2134 0 : .map_err(|_| WalIngestErrorKind::InvalidKey(key, self.lsn))?;
2135 0 : if val == RelDirExists::Exists {
2136 0 : self.pending_directory_entries
2137 0 : .push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
2138 0 : // put tombstone
2139 0 : self.put(key, Value::Image(RelDirExists::Removed.encode()));
2140 0 : // no need to set dirty to true
2141 0 : true
2142 : } else {
2143 0 : false
2144 : }
2145 : } else {
2146 0 : false
2147 : };
2148 :
2149 12 : if found {
2150 : // update logical size
2151 12 : let size_key = rel_size_to_key(rel_tag);
2152 12 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
2153 12 : self.pending_nblocks -= old_size as i64;
2154 12 :
2155 12 : // Remove entry from relation size cache
2156 12 : self.tline.remove_cached_rel_size(&rel_tag);
2157 12 :
2158 12 : // Delete size entry, as well as all blocks; this is currently a no-op because we haven't implemented tombstones in storage.
2159 12 : self.delete(rel_key_range(rel_tag));
2160 0 : }
2161 : }
2162 :
2163 12 : if dirty {
2164 12 : self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
2165 0 : }
2166 : }
2167 :
2168 60 : Ok(())
2169 60 : }
2170 :
2171 36 : pub async fn put_slru_segment_creation(
2172 36 : &mut self,
2173 36 : kind: SlruKind,
2174 36 : segno: u32,
2175 36 : nblocks: BlockNumber,
2176 36 : ctx: &RequestContext,
2177 36 : ) -> Result<(), WalIngestError> {
2178 36 : assert!(self.tline.tenant_shard_id.is_shard_zero());
2179 :
2180 : // Add it to the directory entry
2181 36 : let dir_key = slru_dir_to_key(kind);
2182 36 : let buf = self.get(dir_key, ctx).await?;
2183 36 : let mut dir = SlruSegmentDirectory::des(&buf)?;
2184 :
2185 36 : if !dir.segments.insert(segno) {
2186 0 : Err(WalIngestErrorKind::SlruAlreadyExists(kind, segno))?;
2187 36 : }
2188 36 : self.pending_directory_entries.push((
2189 36 : DirectoryKind::SlruSegment(kind),
2190 36 : MetricsUpdate::Set(dir.segments.len() as u64),
2191 36 : ));
2192 36 : self.put(
2193 36 : dir_key,
2194 36 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
2195 : );
2196 :
2197 : // Put size
2198 36 : let size_key = slru_segment_size_to_key(kind, segno);
2199 36 : let buf = nblocks.to_le_bytes();
2200 36 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2201 36 :
2202 36 : // even if nblocks > 0, we don't insert any actual blocks here
2203 36 :
2204 36 : Ok(())
2205 36 : }
2206 :
2207 : /// Extend SLRU segment
2208 0 : pub fn put_slru_extend(
2209 0 : &mut self,
2210 0 : kind: SlruKind,
2211 0 : segno: u32,
2212 0 : nblocks: BlockNumber,
2213 0 : ) -> Result<(), WalIngestError> {
2214 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
2215 :
2216 : // Put size
2217 0 : let size_key = slru_segment_size_to_key(kind, segno);
2218 0 : let buf = nblocks.to_le_bytes();
2219 0 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
2220 0 : Ok(())
2221 0 : }
2222 :
2223 : /// This method is used for marking truncated SLRU files
2224 0 : pub async fn drop_slru_segment(
2225 0 : &mut self,
2226 0 : kind: SlruKind,
2227 0 : segno: u32,
2228 0 : ctx: &RequestContext,
2229 0 : ) -> Result<(), WalIngestError> {
2230 0 : // Remove it from the directory entry
2231 0 : let dir_key = slru_dir_to_key(kind);
2232 0 : let buf = self.get(dir_key, ctx).await?;
2233 0 : let mut dir = SlruSegmentDirectory::des(&buf)?;
2234 :
2235 0 : if !dir.segments.remove(&segno) {
2236 0 : warn!("slru segment {:?}/{} does not exist", kind, segno);
2237 0 : }
2238 0 : self.pending_directory_entries.push((
2239 0 : DirectoryKind::SlruSegment(kind),
2240 0 : MetricsUpdate::Set(dir.segments.len() as u64),
2241 0 : ));
2242 0 : self.put(
2243 0 : dir_key,
2244 0 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
2245 : );
2246 :
2247 : // Delete size entry, as well as all blocks
2248 0 : self.delete(slru_segment_key_range(kind, segno));
2249 0 :
2250 0 : Ok(())
2251 0 : }
2252 :
2253 : /// Drop a relmapper file (pg_filenode.map)
2254 0 : pub fn drop_relmap_file(&mut self, _spcnode: Oid, _dbnode: Oid) -> Result<(), WalIngestError> {
2255 0 : // TODO
2256 0 : Ok(())
2257 0 : }
2258 :
2259 : /// This method is used for marking truncated SLRU files
2260 0 : pub async fn drop_twophase_file(
2261 0 : &mut self,
2262 0 : xid: u64,
2263 0 : ctx: &RequestContext,
2264 0 : ) -> Result<(), WalIngestError> {
2265 : // Remove it from the directory entry
2266 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
2267 0 : let newdirbuf = if self.tline.pg_version >= 17 {
2268 0 : let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
2269 :
2270 0 : if !dir.xids.remove(&xid) {
2271 0 : warn!("twophase file for xid {} does not exist", xid);
2272 0 : }
2273 0 : self.pending_directory_entries.push((
2274 0 : DirectoryKind::TwoPhase,
2275 0 : MetricsUpdate::Set(dir.xids.len() as u64),
2276 0 : ));
2277 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
2278 : } else {
2279 0 : let xid: u32 = u32::try_from(xid)
2280 0 : .map_err(|e| WalIngestErrorKind::LogicalError(anyhow::Error::from(e)))?;
2281 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
2282 :
2283 0 : if !dir.xids.remove(&xid) {
2284 0 : warn!("twophase file for xid {} does not exist", xid);
2285 0 : }
2286 0 : self.pending_directory_entries.push((
2287 0 : DirectoryKind::TwoPhase,
2288 0 : MetricsUpdate::Set(dir.xids.len() as u64),
2289 0 : ));
2290 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
2291 : };
2292 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
2293 0 :
2294 0 : // Delete it
2295 0 : self.delete(twophase_key_range(xid));
2296 0 :
2297 0 : Ok(())
2298 0 : }
2299 :
2300 96 : pub async fn put_file(
2301 96 : &mut self,
2302 96 : path: &str,
2303 96 : content: &[u8],
2304 96 : ctx: &RequestContext,
2305 96 : ) -> Result<(), WalIngestError> {
2306 96 : let key = aux_file::encode_aux_file_key(path);
2307 : // retrieve the key from the engine
2308 96 : let old_val = match self.get(key, ctx).await {
2309 24 : Ok(val) => Some(val),
2310 72 : Err(PageReconstructError::MissingKey(_)) => None,
2311 0 : Err(e) => return Err(e.into()),
2312 : };
2313 96 : let files: Vec<(&str, &[u8])> = if let Some(ref old_val) = old_val {
2314 24 : aux_file::decode_file_value(old_val).map_err(WalIngestErrorKind::EncodeAuxFileError)?
2315 : } else {
2316 72 : Vec::new()
2317 : };
2318 96 : let mut other_files = Vec::with_capacity(files.len());
2319 96 : let mut modifying_file = None;
2320 120 : for file @ (p, content) in files {
2321 24 : if path == p {
2322 24 : assert!(
2323 24 : modifying_file.is_none(),
2324 0 : "duplicated entries found for {}",
2325 : path
2326 : );
2327 24 : modifying_file = Some(content);
2328 0 : } else {
2329 0 : other_files.push(file);
2330 0 : }
2331 : }
2332 96 : let mut new_files = other_files;
2333 96 : match (modifying_file, content.is_empty()) {
2334 12 : (Some(old_content), false) => {
2335 12 : self.tline
2336 12 : .aux_file_size_estimator
2337 12 : .on_update(old_content.len(), content.len());
2338 12 : new_files.push((path, content));
2339 12 : }
2340 12 : (Some(old_content), true) => {
2341 12 : self.tline
2342 12 : .aux_file_size_estimator
2343 12 : .on_remove(old_content.len());
2344 12 : // not adding the file key to the final `new_files` vec.
2345 12 : }
2346 72 : (None, false) => {
2347 72 : self.tline.aux_file_size_estimator.on_add(content.len());
2348 72 : new_files.push((path, content));
2349 72 : }
2350 : // Compute may request delete of old version of pgstat AUX file if new one exceeds size limit.
2351 : // Compute doesn't know if previous version of this file exists or not, so
2352 : // attempt to delete non-existing file can cause this message.
2353 : // To avoid false alarms, log it as info rather than warning.
2354 0 : (None, true) if path.starts_with("pg_stat/") => {
2355 0 : info!("removing non-existing pg_stat file: {}", path)
2356 : }
2357 0 : (None, true) => warn!("removing non-existing aux file: {}", path),
2358 : }
2359 96 : let new_val = aux_file::encode_file_value(&new_files)
2360 96 : .map_err(WalIngestErrorKind::EncodeAuxFileError)?;
2361 96 : self.put(key, Value::Image(new_val.into()));
2362 96 :
2363 96 : Ok(())
2364 96 : }
2365 :
2366 : ///
2367 : /// Flush changes accumulated so far to the underlying repository.
2368 : ///
2369 : /// Usually, changes made in DatadirModification are atomic, but this allows
2370 : /// you to flush them to the underlying repository before the final `commit`.
2371 : /// That allows to free up the memory used to hold the pending changes.
2372 : ///
2373 : /// Currently only used during bulk import of a data directory. In that
2374 : /// context, breaking the atomicity is OK. If the import is interrupted, the
2375 : /// whole import fails and the timeline will be deleted anyway.
2376 : /// (Or to be precise, it will be left behind for debugging purposes and
2377 : /// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
2378 : ///
2379 : /// Note: A consequence of flushing the pending operations is that they
2380 : /// won't be visible to subsequent operations until `commit`. The function
2381 : /// retains all the metadata, but data pages are flushed. That's again OK
2382 : /// for bulk import, where you are just loading data pages and won't try to
2383 : /// modify the same pages twice.
2384 11580 : pub(crate) async fn flush(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2385 11580 : // Unless we have accumulated a decent amount of changes, it's not worth it
2386 11580 : // to scan through the pending_updates list.
2387 11580 : let pending_nblocks = self.pending_nblocks;
2388 11580 : if pending_nblocks < 10000 {
2389 11580 : return Ok(());
2390 0 : }
2391 :
2392 0 : let mut writer = self.tline.writer().await;
2393 :
2394 : // Flush relation and SLRU data blocks, keep metadata.
2395 0 : if let Some(batch) = self.pending_data_batch.take() {
2396 0 : tracing::debug!(
2397 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2398 0 : batch.max_lsn,
2399 0 : self.tline.get_last_record_lsn()
2400 : );
2401 :
2402 : // This bails out on first error without modifying pending_updates.
2403 : // That's Ok, cf this function's doc comment.
2404 0 : writer.put_batch(batch, ctx).await?;
2405 0 : }
2406 :
2407 0 : if pending_nblocks != 0 {
2408 0 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2409 0 : self.pending_nblocks = 0;
2410 0 : }
2411 :
2412 0 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2413 0 : writer.update_directory_entries_count(kind, count);
2414 0 : }
2415 :
2416 0 : Ok(())
2417 11580 : }
2418 :
2419 : ///
2420 : /// Finish this atomic update, writing all the updated keys to the
2421 : /// underlying timeline.
2422 : /// All the modifications in this atomic update are stamped by the specified LSN.
2423 : ///
2424 4458624 : pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2425 4458624 : let mut writer = self.tline.writer().await;
2426 :
2427 4458624 : let pending_nblocks = self.pending_nblocks;
2428 4458624 : self.pending_nblocks = 0;
2429 :
2430 : // Ordering: the items in this batch do not need to be in any global order, but values for
2431 : // a particular Key must be in Lsn order relative to one another. InMemoryLayer relies on
2432 : // this to do efficient updates to its index. See [`wal_decoder::serialized_batch`] for
2433 : // more details.
2434 :
2435 4458624 : let metadata_batch = {
2436 4458624 : let pending_meta = self
2437 4458624 : .pending_metadata_pages
2438 4458624 : .drain()
2439 4458624 : .flat_map(|(key, values)| {
2440 1644528 : values
2441 1644528 : .into_iter()
2442 1644528 : .map(move |(lsn, value_size, value)| (key, lsn, value_size, value))
2443 4458624 : })
2444 4458624 : .collect::<Vec<_>>();
2445 4458624 :
2446 4458624 : if pending_meta.is_empty() {
2447 2833668 : None
2448 : } else {
2449 1624956 : Some(SerializedValueBatch::from_values(pending_meta))
2450 : }
2451 : };
2452 :
2453 4458624 : let data_batch = self.pending_data_batch.take();
2454 :
2455 4458624 : let maybe_batch = match (data_batch, metadata_batch) {
2456 1587336 : (Some(mut data), Some(metadata)) => {
2457 1587336 : data.extend(metadata);
2458 1587336 : Some(data)
2459 : }
2460 859572 : (Some(data), None) => Some(data),
2461 37620 : (None, Some(metadata)) => Some(metadata),
2462 1974096 : (None, None) => None,
2463 : };
2464 :
2465 4458624 : if let Some(batch) = maybe_batch {
2466 2484528 : tracing::debug!(
2467 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2468 0 : batch.max_lsn,
2469 0 : self.tline.get_last_record_lsn()
2470 : );
2471 :
2472 : // This bails out on first error without modifying pending_updates.
2473 : // That's Ok, cf this function's doc comment.
2474 2484528 : writer.put_batch(batch, ctx).await?;
2475 1974096 : }
2476 :
2477 4458624 : if !self.pending_deletions.is_empty() {
2478 12 : writer.delete_batch(&self.pending_deletions, ctx).await?;
2479 12 : self.pending_deletions.clear();
2480 4458612 : }
2481 :
2482 4458624 : self.pending_lsns.push(self.lsn);
2483 5333772 : for pending_lsn in self.pending_lsns.drain(..) {
2484 5333772 : // TODO(vlad): pretty sure the comment below is not valid anymore
2485 5333772 : // and we can call finish write with the latest LSN
2486 5333772 : //
2487 5333772 : // Ideally, we should be able to call writer.finish_write() only once
2488 5333772 : // with the highest LSN. However, the last_record_lsn variable in the
2489 5333772 : // timeline keeps track of the latest LSN and the immediate previous LSN
2490 5333772 : // so we need to record every LSN to not leave a gap between them.
2491 5333772 : writer.finish_write(pending_lsn);
2492 5333772 : }
2493 :
2494 4458624 : if pending_nblocks != 0 {
2495 1623420 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2496 2835204 : }
2497 :
2498 4458624 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2499 18144 : writer.update_directory_entries_count(kind, count);
2500 18144 : }
2501 :
2502 4458624 : self.pending_metadata_bytes = 0;
2503 4458624 :
2504 4458624 : Ok(())
2505 4458624 : }
2506 :
2507 1750224 : pub(crate) fn len(&self) -> usize {
2508 1750224 : self.pending_metadata_pages.len()
2509 1750224 : + self.pending_data_batch.as_ref().map_or(0, |b| b.len())
2510 1750224 : + self.pending_deletions.len()
2511 1750224 : }
2512 :
2513 : /// Read a page from the Timeline we are writing to. For metadata pages, this passes through
2514 : /// a cache in Self, which makes writes earlier in this modification visible to WAL records later
2515 : /// in the modification.
2516 : ///
2517 : /// For data pages, reads pass directly to the owning Timeline: any ingest code which reads a data
2518 : /// page must ensure that the pages they read are already committed in Timeline, for example
2519 : /// DB create operations are always preceded by a call to commit(). This is special cased because
2520 : /// it's rare: all the 'normal' WAL operations will only read metadata pages such as relation sizes,
2521 : /// and not data pages.
2522 1719516 : async fn get(&self, key: Key, ctx: &RequestContext) -> Result<Bytes, PageReconstructError> {
2523 1719516 : if !Self::is_data_key(&key) {
2524 : // Have we already updated the same key? Read the latest pending updated
2525 : // version in that case.
2526 : //
2527 : // Note: we don't check pending_deletions. It is an error to request a
2528 : // value that has been removed, deletion only avoids leaking storage.
2529 1719516 : if let Some(values) = self.pending_metadata_pages.get(&key.to_compact()) {
2530 95568 : if let Some((_, _, value)) = values.last() {
2531 95568 : return if let Value::Image(img) = value {
2532 95568 : Ok(img.clone())
2533 : } else {
2534 : // Currently, we never need to read back a WAL record that we
2535 : // inserted in the same "transaction". All the metadata updates
2536 : // work directly with Images, and we never need to read actual
2537 : // data pages. We could handle this if we had to, by calling
2538 : // the walredo manager, but let's keep it simple for now.
2539 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
2540 0 : "unexpected pending WAL record"
2541 0 : )))
2542 : };
2543 0 : }
2544 1623948 : }
2545 : } else {
2546 : // This is an expensive check, so we only do it in debug mode. If reading a data key,
2547 : // this key should never be present in pending_data_pages. We ensure this by committing
2548 : // modifications before ingesting DB create operations, which are the only kind that reads
2549 : // data pages during ingest.
2550 0 : if cfg!(debug_assertions) {
2551 0 : assert!(
2552 0 : !self
2553 0 : .pending_data_batch
2554 0 : .as_ref()
2555 0 : .is_some_and(|b| b.updates_key(&key))
2556 0 : );
2557 0 : }
2558 : }
2559 :
2560 : // Metadata page cache miss, or we're reading a data page.
2561 1623948 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
2562 1623948 : self.tline.get(key, lsn, ctx).await
2563 1719516 : }
2564 :
2565 : /// Get a key from the sparse keyspace. Automatically converts the missing key error
2566 : /// and the empty value into None.
2567 0 : async fn sparse_get(
2568 0 : &self,
2569 0 : key: Key,
2570 0 : ctx: &RequestContext,
2571 0 : ) -> Result<Option<Bytes>, PageReconstructError> {
2572 0 : let val = self.get(key, ctx).await;
2573 0 : match val {
2574 0 : Ok(val) if val.is_empty() => Ok(None),
2575 0 : Ok(val) => Ok(Some(val)),
2576 0 : Err(PageReconstructError::MissingKey(_)) => Ok(None),
2577 0 : Err(e) => Err(e),
2578 : }
2579 0 : }
2580 :
2581 3384648 : fn put(&mut self, key: Key, val: Value) {
2582 3384648 : if Self::is_data_key(&key) {
2583 1667208 : self.put_data(key.to_compact(), val)
2584 : } else {
2585 1717440 : self.put_metadata(key.to_compact(), val)
2586 : }
2587 3384648 : }
2588 :
2589 1667208 : fn put_data(&mut self, key: CompactKey, val: Value) {
2590 1667208 : let batch = self
2591 1667208 : .pending_data_batch
2592 1667208 : .get_or_insert_with(SerializedValueBatch::default);
2593 1667208 : batch.put(key, val, self.lsn);
2594 1667208 : }
2595 :
2596 1717440 : fn put_metadata(&mut self, key: CompactKey, val: Value) {
2597 1717440 : let values = self.pending_metadata_pages.entry(key).or_default();
2598 : // Replace the previous value if it exists at the same lsn
2599 1717440 : if let Some((last_lsn, last_value_ser_size, last_value)) = values.last_mut() {
2600 72912 : if *last_lsn == self.lsn {
2601 : // Update the pending_metadata_bytes contribution from this entry, and update the serialized size in place
2602 72912 : self.pending_metadata_bytes -= *last_value_ser_size;
2603 72912 : *last_value_ser_size = val.serialized_size().unwrap() as usize;
2604 72912 : self.pending_metadata_bytes += *last_value_ser_size;
2605 72912 :
2606 72912 : // Use the latest value, this replaces any earlier write to the same (key,lsn), such as much
2607 72912 : // have been generated by synthesized zero page writes prior to the first real write to a page.
2608 72912 : *last_value = val;
2609 72912 : return;
2610 0 : }
2611 1644528 : }
2612 :
2613 1644528 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2614 1644528 : self.pending_metadata_bytes += val_serialized_size;
2615 1644528 : values.push((self.lsn, val_serialized_size, val));
2616 1644528 :
2617 1644528 : if key == CHECKPOINT_KEY.to_compact() {
2618 1392 : tracing::debug!("Checkpoint key added to pending with size {val_serialized_size}");
2619 1643136 : }
2620 1717440 : }
2621 :
2622 12 : fn delete(&mut self, key_range: Range<Key>) {
2623 12 : trace!("DELETE {}-{}", key_range.start, key_range.end);
2624 12 : self.pending_deletions.push((key_range, self.lsn));
2625 12 : }
2626 : }
2627 :
2628 : /// Statistics for a DatadirModification.
2629 : #[derive(Default)]
2630 : pub struct DatadirModificationStats {
2631 : pub metadata_images: u64,
2632 : pub metadata_deltas: u64,
2633 : pub data_images: u64,
2634 : pub data_deltas: u64,
2635 : }
2636 :
2637 : /// This struct facilitates accessing either a committed key from the timeline at a
2638 : /// specific LSN, or the latest uncommitted key from a pending modification.
2639 : ///
2640 : /// During WAL ingestion, the records from multiple LSNs may be batched in the same
2641 : /// modification before being flushed to the timeline. Hence, the routines in WalIngest
2642 : /// need to look up the keys in the modification first before looking them up in the
2643 : /// timeline to not miss the latest updates.
2644 : #[derive(Clone, Copy)]
2645 : pub enum Version<'a> {
2646 : Lsn(Lsn),
2647 : Modified(&'a DatadirModification<'a>),
2648 : }
2649 :
2650 : impl Version<'_> {
2651 31056 : async fn get(
2652 31056 : &self,
2653 31056 : timeline: &Timeline,
2654 31056 : key: Key,
2655 31056 : ctx: &RequestContext,
2656 31056 : ) -> Result<Bytes, PageReconstructError> {
2657 31056 : match self {
2658 30936 : Version::Lsn(lsn) => timeline.get(key, *lsn, ctx).await,
2659 120 : Version::Modified(modification) => modification.get(key, ctx).await,
2660 : }
2661 31056 : }
2662 :
2663 : /// Get a key from the sparse keyspace. Automatically converts the missing key error
2664 : /// and the empty value into None.
2665 0 : async fn sparse_get(
2666 0 : &self,
2667 0 : timeline: &Timeline,
2668 0 : key: Key,
2669 0 : ctx: &RequestContext,
2670 0 : ) -> Result<Option<Bytes>, PageReconstructError> {
2671 0 : let val = self.get(timeline, key, ctx).await;
2672 0 : match val {
2673 0 : Ok(val) if val.is_empty() => Ok(None),
2674 0 : Ok(val) => Ok(Some(val)),
2675 0 : Err(PageReconstructError::MissingKey(_)) => Ok(None),
2676 0 : Err(e) => Err(e),
2677 : }
2678 0 : }
2679 :
2680 213720 : fn get_lsn(&self) -> Lsn {
2681 213720 : match self {
2682 177444 : Version::Lsn(lsn) => *lsn,
2683 36276 : Version::Modified(modification) => modification.lsn,
2684 : }
2685 213720 : }
2686 : }
2687 :
2688 : //--- Metadata structs stored in key-value pairs in the repository.
2689 :
2690 0 : #[derive(Debug, Serialize, Deserialize)]
2691 : pub(crate) struct DbDirectory {
2692 : // (spcnode, dbnode) -> (do relmapper and PG_VERSION files exist)
2693 : pub(crate) dbdirs: HashMap<(Oid, Oid), bool>,
2694 : }
2695 :
2696 : // The format of TwoPhaseDirectory changed in PostgreSQL v17, because the filenames of
2697 : // pg_twophase files was expanded from 32-bit XIDs to 64-bit XIDs. Previously, the files
2698 : // were named like "pg_twophase/000002E5", now they're like
2699 : // "pg_twophsae/0000000A000002E4".
2700 :
2701 0 : #[derive(Debug, Serialize, Deserialize)]
2702 : pub(crate) struct TwoPhaseDirectory {
2703 : pub(crate) xids: HashSet<TransactionId>,
2704 : }
2705 :
2706 0 : #[derive(Debug, Serialize, Deserialize)]
2707 : struct TwoPhaseDirectoryV17 {
2708 : xids: HashSet<u64>,
2709 : }
2710 :
2711 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2712 : pub(crate) struct RelDirectory {
2713 : // Set of relations that exist. (relfilenode, forknum)
2714 : //
2715 : // TODO: Store it as a btree or radix tree or something else that spans multiple
2716 : // key-value pairs, if you have a lot of relations
2717 : pub(crate) rels: HashSet<(Oid, u8)>,
2718 : }
2719 :
2720 0 : #[derive(Debug, Serialize, Deserialize)]
2721 : struct RelSizeEntry {
2722 : nblocks: u32,
2723 : }
2724 :
2725 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2726 : pub(crate) struct SlruSegmentDirectory {
2727 : // Set of SLRU segments that exist.
2728 : pub(crate) segments: HashSet<u32>,
2729 : }
2730 :
2731 : #[derive(Copy, Clone, PartialEq, Eq, Debug, enum_map::Enum)]
2732 : #[repr(u8)]
2733 : pub(crate) enum DirectoryKind {
2734 : Db,
2735 : TwoPhase,
2736 : Rel,
2737 : AuxFiles,
2738 : SlruSegment(SlruKind),
2739 : RelV2,
2740 : }
2741 :
2742 : impl DirectoryKind {
2743 : pub(crate) const KINDS_NUM: usize = <DirectoryKind as Enum>::LENGTH;
2744 54444 : pub(crate) fn offset(&self) -> usize {
2745 54444 : self.into_usize()
2746 54444 : }
2747 : }
2748 :
2749 : static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]);
2750 :
2751 : #[allow(clippy::bool_assert_comparison)]
2752 : #[cfg(test)]
2753 : mod tests {
2754 : use hex_literal::hex;
2755 : use pageserver_api::models::ShardParameters;
2756 : use pageserver_api::shard::ShardStripeSize;
2757 : use utils::id::TimelineId;
2758 : use utils::shard::{ShardCount, ShardNumber};
2759 :
2760 : use super::*;
2761 : use crate::DEFAULT_PG_VERSION;
2762 : use crate::tenant::harness::TenantHarness;
2763 :
2764 : /// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
2765 : #[tokio::test]
2766 12 : async fn aux_files_round_trip() -> anyhow::Result<()> {
2767 12 : let name = "aux_files_round_trip";
2768 12 : let harness = TenantHarness::create(name).await?;
2769 12 :
2770 12 : pub const TIMELINE_ID: TimelineId =
2771 12 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
2772 12 :
2773 12 : let (tenant, ctx) = harness.load().await;
2774 12 : let (tline, ctx) = tenant
2775 12 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
2776 12 : .await?;
2777 12 : let tline = tline.raw_timeline().unwrap();
2778 12 :
2779 12 : // First modification: insert two keys
2780 12 : let mut modification = tline.begin_modification(Lsn(0x1000));
2781 12 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
2782 12 : modification.set_lsn(Lsn(0x1008))?;
2783 12 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
2784 12 : modification.commit(&ctx).await?;
2785 12 : let expect_1008 = HashMap::from([
2786 12 : ("foo/bar1".to_string(), Bytes::from_static(b"content1")),
2787 12 : ("foo/bar2".to_string(), Bytes::from_static(b"content2")),
2788 12 : ]);
2789 12 :
2790 12 : let io_concurrency = IoConcurrency::spawn_for_test();
2791 12 :
2792 12 : let readback = tline
2793 12 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2794 12 : .await?;
2795 12 : assert_eq!(readback, expect_1008);
2796 12 :
2797 12 : // Second modification: update one key, remove the other
2798 12 : let mut modification = tline.begin_modification(Lsn(0x2000));
2799 12 : modification.put_file("foo/bar1", b"content3", &ctx).await?;
2800 12 : modification.set_lsn(Lsn(0x2008))?;
2801 12 : modification.put_file("foo/bar2", b"", &ctx).await?;
2802 12 : modification.commit(&ctx).await?;
2803 12 : let expect_2008 =
2804 12 : HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
2805 12 :
2806 12 : let readback = tline
2807 12 : .list_aux_files(Lsn(0x2008), &ctx, io_concurrency.clone())
2808 12 : .await?;
2809 12 : assert_eq!(readback, expect_2008);
2810 12 :
2811 12 : // Reading back in time works
2812 12 : let readback = tline
2813 12 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2814 12 : .await?;
2815 12 : assert_eq!(readback, expect_1008);
2816 12 :
2817 12 : Ok(())
2818 12 : }
2819 :
2820 : #[test]
2821 12 : fn gap_finding() {
2822 12 : let rel = RelTag {
2823 12 : spcnode: 1663,
2824 12 : dbnode: 208101,
2825 12 : relnode: 2620,
2826 12 : forknum: 0,
2827 12 : };
2828 12 : let base_blkno = 1;
2829 12 :
2830 12 : let base_key = rel_block_to_key(rel, base_blkno);
2831 12 : let before_base_key = rel_block_to_key(rel, base_blkno - 1);
2832 12 :
2833 12 : let shard = ShardIdentity::unsharded();
2834 12 :
2835 12 : let mut previous_nblocks = 0;
2836 132 : for i in 0..10 {
2837 120 : let crnt_blkno = base_blkno + i;
2838 120 : let gaps = DatadirModification::find_gaps(rel, crnt_blkno, previous_nblocks, &shard);
2839 120 :
2840 120 : previous_nblocks = crnt_blkno + 1;
2841 120 :
2842 120 : if i == 0 {
2843 : // The first block we write is 1, so we should find the gap.
2844 12 : assert_eq!(gaps.unwrap(), KeySpace::single(before_base_key..base_key));
2845 : } else {
2846 108 : assert!(gaps.is_none());
2847 : }
2848 : }
2849 :
2850 : // This is an update to an already existing block. No gaps here.
2851 12 : let update_blkno = 5;
2852 12 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2853 12 : assert!(gaps.is_none());
2854 :
2855 : // This is an update past the current end block.
2856 12 : let after_gap_blkno = 20;
2857 12 : let gaps = DatadirModification::find_gaps(rel, after_gap_blkno, previous_nblocks, &shard);
2858 12 :
2859 12 : let gap_start_key = rel_block_to_key(rel, previous_nblocks);
2860 12 : let after_gap_key = rel_block_to_key(rel, after_gap_blkno);
2861 12 : assert_eq!(
2862 12 : gaps.unwrap(),
2863 12 : KeySpace::single(gap_start_key..after_gap_key)
2864 12 : );
2865 12 : }
2866 :
2867 : #[test]
2868 12 : fn sharded_gap_finding() {
2869 12 : let rel = RelTag {
2870 12 : spcnode: 1663,
2871 12 : dbnode: 208101,
2872 12 : relnode: 2620,
2873 12 : forknum: 0,
2874 12 : };
2875 12 :
2876 12 : let first_blkno = 6;
2877 12 :
2878 12 : // This shard will get the even blocks
2879 12 : let shard = ShardIdentity::from_params(
2880 12 : ShardNumber(0),
2881 12 : &ShardParameters {
2882 12 : count: ShardCount(2),
2883 12 : stripe_size: ShardStripeSize(1),
2884 12 : },
2885 12 : );
2886 12 :
2887 12 : // Only keys belonging to this shard are considered as gaps.
2888 12 : let mut previous_nblocks = 0;
2889 12 : let gaps =
2890 12 : DatadirModification::find_gaps(rel, first_blkno, previous_nblocks, &shard).unwrap();
2891 12 : assert!(!gaps.ranges.is_empty());
2892 36 : for gap_range in gaps.ranges {
2893 24 : let mut k = gap_range.start;
2894 48 : while k != gap_range.end {
2895 24 : assert_eq!(shard.get_shard_number(&k), shard.number);
2896 24 : k = k.next();
2897 : }
2898 : }
2899 :
2900 12 : previous_nblocks = first_blkno;
2901 12 :
2902 12 : let update_blkno = 2;
2903 12 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2904 12 : assert!(gaps.is_none());
2905 12 : }
2906 :
2907 : /*
2908 : fn assert_current_logical_size<R: Repository>(timeline: &DatadirTimeline<R>, lsn: Lsn) {
2909 : let incremental = timeline.get_current_logical_size();
2910 : let non_incremental = timeline
2911 : .get_current_logical_size_non_incremental(lsn)
2912 : .unwrap();
2913 : assert_eq!(incremental, non_incremental);
2914 : }
2915 : */
2916 :
2917 : /*
2918 : ///
2919 : /// Test list_rels() function, with branches and dropped relations
2920 : ///
2921 : #[test]
2922 : fn test_list_rels_drop() -> Result<()> {
2923 : let repo = RepoHarness::create("test_list_rels_drop")?.load();
2924 : let tline = create_empty_timeline(repo, TIMELINE_ID)?;
2925 : const TESTDB: u32 = 111;
2926 :
2927 : // Import initial dummy checkpoint record, otherwise the get_timeline() call
2928 : // after branching fails below
2929 : let mut writer = tline.begin_record(Lsn(0x10));
2930 : writer.put_checkpoint(ZERO_CHECKPOINT.clone())?;
2931 : writer.finish()?;
2932 :
2933 : // Create a relation on the timeline
2934 : let mut writer = tline.begin_record(Lsn(0x20));
2935 : writer.put_rel_page_image(TESTREL_A, 0, TEST_IMG("foo blk 0 at 2"))?;
2936 : writer.finish()?;
2937 :
2938 : let writer = tline.begin_record(Lsn(0x00));
2939 : writer.finish()?;
2940 :
2941 : // Check that list_rels() lists it after LSN 2, but no before it
2942 : assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
2943 : assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
2944 : assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
2945 :
2946 : // Create a branch, check that the relation is visible there
2947 : repo.branch_timeline(&tline, NEW_TIMELINE_ID, Lsn(0x30))?;
2948 : let newtline = match repo.get_timeline(NEW_TIMELINE_ID)?.local_timeline() {
2949 : Some(timeline) => timeline,
2950 : None => panic!("Should have a local timeline"),
2951 : };
2952 : let newtline = DatadirTimelineImpl::new(newtline);
2953 : assert!(newtline
2954 : .list_rels(0, TESTDB, Lsn(0x30))?
2955 : .contains(&TESTREL_A));
2956 :
2957 : // Drop it on the branch
2958 : let mut new_writer = newtline.begin_record(Lsn(0x40));
2959 : new_writer.drop_relation(TESTREL_A)?;
2960 : new_writer.finish()?;
2961 :
2962 : // Check that it's no longer listed on the branch after the point where it was dropped
2963 : assert!(newtline
2964 : .list_rels(0, TESTDB, Lsn(0x30))?
2965 : .contains(&TESTREL_A));
2966 : assert!(!newtline
2967 : .list_rels(0, TESTDB, Lsn(0x40))?
2968 : .contains(&TESTREL_A));
2969 :
2970 : // Run checkpoint and garbage collection and check that it's still not visible
2971 : newtline.checkpoint(CheckpointConfig::Forced)?;
2972 : repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
2973 :
2974 : assert!(!newtline
2975 : .list_rels(0, TESTDB, Lsn(0x40))?
2976 : .contains(&TESTREL_A));
2977 :
2978 : Ok(())
2979 : }
2980 : */
2981 :
2982 : /*
2983 : #[test]
2984 : fn test_read_beyond_eof() -> Result<()> {
2985 : let repo = RepoHarness::create("test_read_beyond_eof")?.load();
2986 : let tline = create_test_timeline(repo, TIMELINE_ID)?;
2987 :
2988 : make_some_layers(&tline, Lsn(0x20))?;
2989 : let mut writer = tline.begin_record(Lsn(0x60));
2990 : walingest.put_rel_page_image(
2991 : &mut writer,
2992 : TESTREL_A,
2993 : 0,
2994 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x60))),
2995 : )?;
2996 : writer.finish()?;
2997 :
2998 : // Test read before rel creation. Should error out.
2999 : assert!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x10), false).is_err());
3000 :
3001 : // Read block beyond end of relation at different points in time.
3002 : // These reads should fall into different delta, image, and in-memory layers.
3003 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x20), false)?, ZERO_PAGE);
3004 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x25), false)?, ZERO_PAGE);
3005 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x30), false)?, ZERO_PAGE);
3006 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x35), false)?, ZERO_PAGE);
3007 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x40), false)?, ZERO_PAGE);
3008 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x45), false)?, ZERO_PAGE);
3009 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x50), false)?, ZERO_PAGE);
3010 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x55), false)?, ZERO_PAGE);
3011 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x60), false)?, ZERO_PAGE);
3012 :
3013 : // Test on an in-memory layer with no preceding layer
3014 : let mut writer = tline.begin_record(Lsn(0x70));
3015 : walingest.put_rel_page_image(
3016 : &mut writer,
3017 : TESTREL_B,
3018 : 0,
3019 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x70))),
3020 : )?;
3021 : writer.finish()?;
3022 :
3023 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_B, 1, Lsn(0x70), false)?6, ZERO_PAGE);
3024 :
3025 : Ok(())
3026 : }
3027 : */
3028 : }
|