LCOV - code coverage report
Current view: top level - pageserver/src - walingest.rs (source / functions) Coverage Total Hit
Test: 1e20c4f2b28aa592527961bb32170ebbd2c9172f.info Lines: 55.8 % 1598 892
Test Date: 2025-07-16 12:29:03 Functions: 56.5 % 92 52

            Line data    Source code
       1              : //!
       2              : //! Parse PostgreSQL WAL records and store them in a neon Timeline.
       3              : //!
       4              : //! The pipeline for ingesting WAL looks like this:
       5              : //!
       6              : //! WAL receiver  -> [`wal_decoder`] ->  WalIngest  ->   Repository
       7              : //!
       8              : //! The WAL receiver receives a stream of WAL from the WAL safekeepers.
       9              : //! Records get decoded and interpreted in the [`wal_decoder`] module
      10              : //! and then stored to the Repository by WalIngest.
      11              : //!
      12              : //! The neon Repository can store page versions in two formats: as
      13              : //! page images, or a WAL records. [`wal_decoder::models::InterpretedWalRecord::from_bytes_filtered`]
      14              : //! extracts page images out of some WAL records, but mostly it's WAL
      15              : //! records. If a WAL record modifies multiple pages, WalIngest
      16              : //! will call Repository::put_rel_wal_record or put_rel_page_image functions
      17              : //! separately for each modified page.
      18              : //!
      19              : //! To reconstruct a page using a WAL record, the Repository calls the
      20              : //! code in walredo.rs. walredo.rs passes most WAL records to the WAL
      21              : //! redo Postgres process, but some records it can handle directly with
      22              : //! bespoken Rust code.
      23              : 
      24              : use std::backtrace::Backtrace;
      25              : use std::collections::HashMap;
      26              : use std::sync::{Arc, OnceLock};
      27              : use std::time::{Duration, Instant, SystemTime};
      28              : 
      29              : use bytes::{Buf, Bytes};
      30              : use pageserver_api::key::{Key, rel_block_to_key};
      31              : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
      32              : use pageserver_api::shard::ShardIdentity;
      33              : use postgres_ffi::walrecord::*;
      34              : use postgres_ffi::{
      35              :     PgMajorVersion, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch,
      36              :     fsm_logical_to_physical, pg_constants,
      37              : };
      38              : use postgres_ffi_types::TimestampTz;
      39              : use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
      40              : use tracing::*;
      41              : use utils::bin_ser::{DeserializeError, SerializeError};
      42              : use utils::lsn::Lsn;
      43              : use utils::rate_limit::RateLimit;
      44              : use utils::{critical_timeline, failpoint_support};
      45              : use wal_decoder::models::record::NeonWalRecord;
      46              : use wal_decoder::models::*;
      47              : 
      48              : use crate::ZERO_PAGE;
      49              : use crate::context::RequestContext;
      50              : use crate::metrics::WAL_INGEST;
      51              : use crate::pgdatadir_mapping::{DatadirModification, Version};
      52              : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
      53              : use crate::tenant::{PageReconstructError, Timeline};
      54              : 
      55              : enum_pgversion! {CheckPoint, pgv::CheckPoint}
      56              : 
      57              : impl CheckPoint {
      58            3 :     fn encode(&self) -> Result<Bytes, SerializeError> {
      59            3 :         enum_pgversion_dispatch!(self, CheckPoint, cp, { cp.encode() })
      60            3 :     }
      61              : 
      62        72917 :     fn update_next_xid(&mut self, xid: u32) -> bool {
      63        72917 :         enum_pgversion_dispatch!(self, CheckPoint, cp, { cp.update_next_xid(xid) })
      64        72917 :     }
      65              : 
      66            0 :     pub fn update_next_multixid(&mut self, multi_xid: u32, multi_offset: u32) -> bool {
      67            0 :         enum_pgversion_dispatch!(self, CheckPoint, cp, {
      68            0 :             cp.update_next_multixid(multi_xid, multi_offset)
      69              :         })
      70            0 :     }
      71              : }
      72              : 
      73              : /// Temporary limitation of WAL lag warnings after attach
      74              : ///
      75              : /// After tenant attach, we want to limit WAL lag warnings because
      76              : /// we don't look at the WAL until the attach is complete, which
      77              : /// might take a while.
      78              : pub struct WalLagCooldown {
      79              :     /// Until when should this limitation apply at all
      80              :     active_until: std::time::Instant,
      81              :     /// The maximum lag to suppress. Lags above this limit get reported anyways.
      82              :     max_lag: Duration,
      83              : }
      84              : 
      85              : impl WalLagCooldown {
      86            0 :     pub fn new(attach_start: Instant, attach_duration: Duration) -> Self {
      87            0 :         Self {
      88            0 :             active_until: attach_start + attach_duration * 3 + Duration::from_secs(120),
      89            0 :             max_lag: attach_duration * 2 + Duration::from_secs(60),
      90            0 :         }
      91            0 :     }
      92              : }
      93              : 
      94              : pub struct WalIngest {
      95              :     attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
      96              :     shard: ShardIdentity,
      97              :     checkpoint: CheckPoint,
      98              :     checkpoint_modified: bool,
      99              :     warn_ingest_lag: WarnIngestLag,
     100              : }
     101              : 
     102              : struct WarnIngestLag {
     103              :     lag_msg_ratelimit: RateLimit,
     104              :     future_lsn_msg_ratelimit: RateLimit,
     105              :     timestamp_invalid_msg_ratelimit: RateLimit,
     106              : }
     107              : 
     108              : pub struct WalIngestError {
     109              :     pub backtrace: std::backtrace::Backtrace,
     110              :     pub kind: WalIngestErrorKind,
     111              : }
     112              : 
     113              : #[derive(thiserror::Error, Debug)]
     114              : pub enum WalIngestErrorKind {
     115              :     #[error(transparent)]
     116              :     #[allow(private_interfaces)]
     117              :     PageReconstructError(#[from] PageReconstructError),
     118              :     #[error(transparent)]
     119              :     DeserializationFailure(#[from] DeserializeError),
     120              :     #[error(transparent)]
     121              :     SerializationFailure(#[from] SerializeError),
     122              :     #[error("the request contains data not supported by pageserver: {0} @ {1}")]
     123              :     InvalidKey(Key, Lsn),
     124              :     #[error("twophase file for xid {0} already exists")]
     125              :     FileAlreadyExists(u64),
     126              :     #[error("slru segment {0:?}/{1} already exists")]
     127              :     SlruAlreadyExists(SlruKind, u32),
     128              :     #[error("relation already exists")]
     129              :     RelationAlreadyExists(RelTag),
     130              :     #[error("invalid reldir key {0}")]
     131              :     InvalidRelDirKey(Key),
     132              : 
     133              :     #[error(transparent)]
     134              :     LogicalError(anyhow::Error),
     135              :     #[error(transparent)]
     136              :     EncodeAuxFileError(anyhow::Error),
     137              :     #[error(transparent)]
     138              :     MaybeRelSizeV2Error(anyhow::Error),
     139              : 
     140              :     #[error("timeline shutting down")]
     141              :     Cancelled,
     142              : }
     143              : 
     144              : impl<T> From<T> for WalIngestError
     145              : where
     146              :     WalIngestErrorKind: From<T>,
     147              : {
     148            0 :     fn from(value: T) -> Self {
     149            0 :         WalIngestError {
     150            0 :             backtrace: Backtrace::capture(),
     151            0 :             kind: WalIngestErrorKind::from(value),
     152            0 :         }
     153            0 :     }
     154              : }
     155              : 
     156              : impl std::error::Error for WalIngestError {
     157            0 :     fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
     158            0 :         self.kind.source()
     159            0 :     }
     160              : }
     161              : 
     162              : impl core::fmt::Display for WalIngestError {
     163            0 :     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
     164            0 :         self.kind.fmt(f)
     165            0 :     }
     166              : }
     167              : 
     168              : impl core::fmt::Debug for WalIngestError {
     169            0 :     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
     170            0 :         if f.alternate() {
     171            0 :             f.debug_map()
     172            0 :                 .key(&"backtrace")
     173            0 :                 .value(&self.backtrace)
     174            0 :                 .key(&"kind")
     175            0 :                 .value(&self.kind)
     176            0 :                 .finish()
     177              :         } else {
     178            0 :             writeln!(f, "Error: {:?}", self.kind)?;
     179            0 :             if self.backtrace.status() == std::backtrace::BacktraceStatus::Captured {
     180            0 :                 writeln!(f, "Stack backtrace: {:?}", self.backtrace)?;
     181            0 :             }
     182            0 :             Ok(())
     183              :         }
     184            0 :     }
     185              : }
     186              : 
     187              : #[macro_export]
     188              : macro_rules! ensure_walingest {
     189              :     ($($t:tt)*) => {
     190       354701 :         _ = || -> Result<(), anyhow::Error> {
     191       354701 :             anyhow::ensure!($($t)*);
     192       354701 :             Ok(())
     193       354701 :         }().map_err(WalIngestErrorKind::LogicalError)?;
     194              :     };
     195              : }
     196              : 
     197              : impl WalIngest {
     198            6 :     pub async fn new(
     199            6 :         timeline: &Timeline,
     200            6 :         startpoint: Lsn,
     201            6 :         ctx: &RequestContext,
     202            6 :     ) -> Result<WalIngest, WalIngestError> {
     203              :         // Fetch the latest checkpoint into memory, so that we can compare with it
     204              :         // quickly in `ingest_record` and update it when it changes.
     205            6 :         let checkpoint_bytes = timeline.get_checkpoint(startpoint, ctx).await?;
     206            6 :         let pgversion = timeline.pg_version;
     207              : 
     208            6 :         let checkpoint = dispatch_pgversion!(pgversion, {
     209            0 :             let checkpoint = pgv::CheckPoint::decode(&checkpoint_bytes)?;
     210            4 :             trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
     211            0 :             <pgv::CheckPoint as Into<CheckPoint>>::into(checkpoint)
     212              :         });
     213              : 
     214            6 :         Ok(WalIngest {
     215            6 :             shard: *timeline.get_shard_identity(),
     216            6 :             checkpoint,
     217            6 :             checkpoint_modified: false,
     218            6 :             attach_wal_lag_cooldown: timeline.attach_wal_lag_cooldown.clone(),
     219            6 :             warn_ingest_lag: WarnIngestLag {
     220            6 :                 lag_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
     221            6 :                 future_lsn_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
     222            6 :                 timestamp_invalid_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
     223            6 :             },
     224            6 :         })
     225            6 :     }
     226              : 
     227              :     /// Ingest an interpreted PostgreSQL WAL record by doing writes to the underlying key value
     228              :     /// storage of a given timeline.
     229              :     ///
     230              :     /// This function updates `lsn` field of `DatadirModification`
     231              :     ///
     232              :     /// This function returns `true` if the record was ingested, and `false` if it was filtered out
     233        72926 :     pub async fn ingest_record(
     234        72926 :         &mut self,
     235        72926 :         interpreted: InterpretedWalRecord,
     236        72926 :         modification: &mut DatadirModification<'_>,
     237        72926 :         ctx: &RequestContext,
     238        72926 :     ) -> Result<bool, WalIngestError> {
     239        72926 :         WAL_INGEST.records_received.inc();
     240        72926 :         let prev_len = modification.len();
     241              : 
     242        72926 :         modification.set_lsn(interpreted.next_record_lsn)?;
     243              : 
     244        72926 :         if matches!(interpreted.flush_uncommitted, FlushUncommittedRecords::Yes) {
     245              :             // Records of this type should always be preceded by a commit(), as they
     246              :             // rely on reading data pages back from the Timeline.
     247            0 :             assert!(!modification.has_dirty_data());
     248        72926 :         }
     249              : 
     250        72926 :         assert!(!self.checkpoint_modified);
     251        72926 :         if interpreted.xid != pg_constants::INVALID_TRANSACTION_ID
     252        72917 :             && self.checkpoint.update_next_xid(interpreted.xid)
     253            1 :         {
     254            1 :             self.checkpoint_modified = true;
     255        72925 :         }
     256              : 
     257        72926 :         failpoint_support::sleep_millis_async!("wal-ingest-record-sleep");
     258              : 
     259           33 :         match interpreted.metadata_record {
     260            6 :             Some(MetadataRecord::Heapam(rec)) => match rec {
     261            6 :                 HeapamRecord::ClearVmBits(clear_vm_bits) => {
     262            6 :                     self.ingest_clear_vm_bits(clear_vm_bits, modification, ctx)
     263            6 :                         .await?;
     264              :                 }
     265              :             },
     266            0 :             Some(MetadataRecord::Neonrmgr(rec)) => match rec {
     267            0 :                 NeonrmgrRecord::ClearVmBits(clear_vm_bits) => {
     268            0 :                     self.ingest_clear_vm_bits(clear_vm_bits, modification, ctx)
     269            0 :                         .await?;
     270              :                 }
     271              :             },
     272            8 :             Some(MetadataRecord::Smgr(rec)) => match rec {
     273            8 :                 SmgrRecord::Create(create) => {
     274            8 :                     self.ingest_xlog_smgr_create(create, modification, ctx)
     275            8 :                         .await?;
     276              :                 }
     277            0 :                 SmgrRecord::Truncate(truncate) => {
     278            0 :                     self.ingest_xlog_smgr_truncate(truncate, modification, ctx)
     279            0 :                         .await?;
     280              :                 }
     281              :             },
     282            0 :             Some(MetadataRecord::Dbase(rec)) => match rec {
     283            0 :                 DbaseRecord::Create(create) => {
     284            0 :                     self.ingest_xlog_dbase_create(create, modification, ctx)
     285            0 :                         .await?;
     286              :                 }
     287            0 :                 DbaseRecord::Drop(drop) => {
     288            0 :                     self.ingest_xlog_dbase_drop(drop, modification, ctx).await?;
     289              :                 }
     290              :             },
     291            0 :             Some(MetadataRecord::Clog(rec)) => match rec {
     292            0 :                 ClogRecord::ZeroPage(zero_page) => {
     293            0 :                     self.ingest_clog_zero_page(zero_page, modification, ctx)
     294            0 :                         .await?;
     295              :                 }
     296            0 :                 ClogRecord::Truncate(truncate) => {
     297            0 :                     self.ingest_clog_truncate(truncate, modification, ctx)
     298            0 :                         .await?;
     299              :                 }
     300              :             },
     301            4 :             Some(MetadataRecord::Xact(rec)) => {
     302            4 :                 self.ingest_xact_record(rec, modification, ctx).await?;
     303              :             }
     304            0 :             Some(MetadataRecord::MultiXact(rec)) => match rec {
     305            0 :                 MultiXactRecord::ZeroPage(zero_page) => {
     306            0 :                     self.ingest_multixact_zero_page(zero_page, modification, ctx)
     307            0 :                         .await?;
     308              :                 }
     309            0 :                 MultiXactRecord::Create(create) => {
     310            0 :                     self.ingest_multixact_create(modification, &create)?;
     311              :                 }
     312            0 :                 MultiXactRecord::Truncate(truncate) => {
     313            0 :                     self.ingest_multixact_truncate(modification, &truncate, ctx)
     314            0 :                         .await?;
     315              :                 }
     316              :             },
     317            0 :             Some(MetadataRecord::Relmap(rec)) => match rec {
     318            0 :                 RelmapRecord::Update(update) => {
     319            0 :                     self.ingest_relmap_update(update, modification, ctx).await?;
     320              :                 }
     321              :             },
     322           15 :             Some(MetadataRecord::Xlog(rec)) => match rec {
     323           15 :                 XlogRecord::Raw(raw) => {
     324           15 :                     self.ingest_raw_xlog_record(raw, modification, ctx).await?;
     325              :                 }
     326              :             },
     327            0 :             Some(MetadataRecord::LogicalMessage(rec)) => match rec {
     328            0 :                 LogicalMessageRecord::Put(put) => {
     329            0 :                     self.ingest_logical_message_put(put, modification, ctx)
     330            0 :                         .await?;
     331              :                 }
     332              :                 #[cfg(feature = "testing")]
     333              :                 LogicalMessageRecord::Failpoint => {
     334              :                     // This is a convenient way to make the WAL ingestion pause at
     335              :                     // particular point in the WAL. For more fine-grained control,
     336              :                     // we could peek into the message and only pause if it contains
     337              :                     // a particular string, for example, but this is enough for now.
     338            0 :                     failpoint_support::sleep_millis_async!(
     339              :                         "pageserver-wal-ingest-logical-message-sleep"
     340              :                     );
     341              :                 }
     342              :             },
     343            0 :             Some(MetadataRecord::Standby(rec)) => {
     344            0 :                 self.ingest_standby_record(rec).unwrap();
     345            0 :             }
     346            0 :             Some(MetadataRecord::Replorigin(rec)) => {
     347            0 :                 self.ingest_replorigin_record(rec, modification).await?;
     348              :             }
     349        72893 :             None => {
     350        72893 :                 // There are two cases through which we end up here:
     351        72893 :                 // 1. The resource manager for the original PG WAL record
     352        72893 :                 //    is [`pg_constants::RM_TBLSPC_ID`]. This is not a supported
     353        72893 :                 //    record type within Neon.
     354        72893 :                 // 2. The resource manager id was unknown to
     355        72893 :                 //    [`wal_decoder::decoder::MetadataRecord::from_decoded`].
     356        72893 :                 // TODO(vlad): Tighten this up more once we build confidence
     357        72893 :                 // that case (2) does not happen in the field.
     358        72893 :             }
     359              :         }
     360              : 
     361        72926 :         modification
     362        72926 :             .ingest_batch(interpreted.batch, &self.shard, ctx)
     363        72926 :             .await?;
     364              : 
     365              :         // If checkpoint data was updated, store the new version in the repository
     366        72926 :         if self.checkpoint_modified {
     367            3 :             let new_checkpoint_bytes = self.checkpoint.encode()?;
     368              : 
     369            3 :             modification.put_checkpoint(new_checkpoint_bytes)?;
     370            3 :             self.checkpoint_modified = false;
     371        72923 :         }
     372              : 
     373              :         // Note that at this point this record is only cached in the modification
     374              :         // until commit() is called to flush the data into the repository and update
     375              :         // the latest LSN.
     376              : 
     377        72926 :         Ok(modification.len() > prev_len)
     378        72926 :     }
     379              : 
     380              :     /// This is the same as AdjustToFullTransactionId(xid) in PostgreSQL
     381            0 :     fn adjust_to_full_transaction_id(&self, xid: TransactionId) -> Result<u64, WalIngestError> {
     382            0 :         let next_full_xid =
     383            0 :             enum_pgversion_dispatch!(&self.checkpoint, CheckPoint, cp, { cp.nextXid.value });
     384              : 
     385            0 :         let next_xid = (next_full_xid) as u32;
     386            0 :         let mut epoch = (next_full_xid >> 32) as u32;
     387              : 
     388            0 :         if xid > next_xid {
     389              :             // Wraparound occurred, must be from a prev epoch.
     390            0 :             if epoch == 0 {
     391            0 :                 Err(WalIngestErrorKind::LogicalError(anyhow::anyhow!(
     392            0 :                     "apparent XID wraparound with prepared transaction XID {xid}, nextXid is {next_full_xid}"
     393            0 :                 )))?;
     394            0 :             }
     395            0 :             epoch -= 1;
     396            0 :         }
     397              : 
     398            0 :         Ok(((epoch as u64) << 32) | xid as u64)
     399            0 :     }
     400              : 
     401            6 :     async fn ingest_clear_vm_bits(
     402            6 :         &mut self,
     403            6 :         clear_vm_bits: ClearVmBits,
     404            6 :         modification: &mut DatadirModification<'_>,
     405            6 :         ctx: &RequestContext,
     406            6 :     ) -> Result<(), WalIngestError> {
     407              :         let ClearVmBits {
     408            6 :             new_heap_blkno,
     409            6 :             old_heap_blkno,
     410            6 :             flags,
     411            6 :             vm_rel,
     412            6 :         } = clear_vm_bits;
     413              :         // Clear the VM bits if required.
     414            6 :         let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
     415            6 :         let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
     416              : 
     417              :         // VM bits can only be cleared on the shard(s) owning the VM relation, and must be within
     418              :         // its view of the VM relation size. Out of caution, error instead of failing WAL ingestion,
     419              :         // as there has historically been cases where PostgreSQL has cleared spurious VM pages. See:
     420              :         // https://github.com/neondatabase/neon/pull/10634.
     421            6 :         let Some(vm_size) = get_relsize(modification, vm_rel, ctx).await? else {
     422            0 :             critical_timeline!(
     423            0 :                 modification.tline.tenant_shard_id,
     424            0 :                 modification.tline.timeline_id,
     425            0 :                 "clear_vm_bits for unknown VM relation {vm_rel}"
     426              :             );
     427            0 :             return Ok(());
     428              :         };
     429            6 :         if let Some(blknum) = new_vm_blk {
     430            6 :             if blknum >= vm_size {
     431            0 :                 critical_timeline!(
     432            0 :                     modification.tline.tenant_shard_id,
     433            0 :                     modification.tline.timeline_id,
     434            0 :                     "new_vm_blk {blknum} not in {vm_rel} of size {vm_size}"
     435              :                 );
     436            0 :                 new_vm_blk = None;
     437            6 :             }
     438            0 :         }
     439            6 :         if let Some(blknum) = old_vm_blk {
     440            0 :             if blknum >= vm_size {
     441            0 :                 critical_timeline!(
     442            0 :                     modification.tline.tenant_shard_id,
     443            0 :                     modification.tline.timeline_id,
     444            0 :                     "old_vm_blk {blknum} not in {vm_rel} of size {vm_size}"
     445              :                 );
     446            0 :                 old_vm_blk = None;
     447            0 :             }
     448            6 :         }
     449              : 
     450            6 :         if new_vm_blk.is_none() && old_vm_blk.is_none() {
     451            0 :             return Ok(());
     452            6 :         } else if new_vm_blk == old_vm_blk {
     453              :             // An UPDATE record that needs to clear the bits for both old and the new page, both of
     454              :             // which reside on the same VM page.
     455            0 :             self.put_rel_wal_record(
     456            0 :                 modification,
     457            0 :                 vm_rel,
     458            0 :                 new_vm_blk.unwrap(),
     459            0 :                 NeonWalRecord::ClearVisibilityMapFlags {
     460            0 :                     new_heap_blkno,
     461            0 :                     old_heap_blkno,
     462            0 :                     flags,
     463            0 :                 },
     464            0 :                 ctx,
     465            0 :             )
     466            0 :             .await?;
     467              :         } else {
     468              :             // Clear VM bits for one heap page, or for two pages that reside on different VM pages.
     469            6 :             if let Some(new_vm_blk) = new_vm_blk {
     470            6 :                 self.put_rel_wal_record(
     471            6 :                     modification,
     472            6 :                     vm_rel,
     473            6 :                     new_vm_blk,
     474            6 :                     NeonWalRecord::ClearVisibilityMapFlags {
     475            6 :                         new_heap_blkno,
     476            6 :                         old_heap_blkno: None,
     477            6 :                         flags,
     478            6 :                     },
     479            6 :                     ctx,
     480            6 :                 )
     481            6 :                 .await?;
     482            0 :             }
     483            6 :             if let Some(old_vm_blk) = old_vm_blk {
     484            0 :                 self.put_rel_wal_record(
     485            0 :                     modification,
     486            0 :                     vm_rel,
     487            0 :                     old_vm_blk,
     488            0 :                     NeonWalRecord::ClearVisibilityMapFlags {
     489            0 :                         new_heap_blkno: None,
     490            0 :                         old_heap_blkno,
     491            0 :                         flags,
     492            0 :                     },
     493            0 :                     ctx,
     494            0 :                 )
     495            0 :                 .await?;
     496            6 :             }
     497              :         }
     498            6 :         Ok(())
     499            6 :     }
     500              : 
     501              :     /// Subroutine of ingest_record(), to handle an XLOG_DBASE_CREATE record.
     502            0 :     async fn ingest_xlog_dbase_create(
     503            0 :         &mut self,
     504            0 :         create: DbaseCreate,
     505            0 :         modification: &mut DatadirModification<'_>,
     506            0 :         ctx: &RequestContext,
     507            0 :     ) -> Result<(), WalIngestError> {
     508              :         let DbaseCreate {
     509            0 :             db_id,
     510            0 :             tablespace_id,
     511            0 :             src_db_id,
     512            0 :             src_tablespace_id,
     513            0 :         } = create;
     514              : 
     515            0 :         let rels = modification
     516            0 :             .tline
     517            0 :             .list_rels(
     518            0 :                 src_tablespace_id,
     519            0 :                 src_db_id,
     520            0 :                 Version::Modified(modification),
     521            0 :                 ctx,
     522            0 :             )
     523            0 :             .await?;
     524              : 
     525            0 :         debug!("ingest_xlog_dbase_create: {} rels", rels.len());
     526              : 
     527              :         // Copy relfilemap
     528            0 :         let filemap = modification
     529            0 :             .tline
     530            0 :             .get_relmap_file(
     531            0 :                 src_tablespace_id,
     532            0 :                 src_db_id,
     533            0 :                 Version::Modified(modification),
     534            0 :                 ctx,
     535            0 :             )
     536            0 :             .await?;
     537            0 :         modification
     538            0 :             .put_relmap_file(tablespace_id, db_id, filemap, ctx)
     539            0 :             .await?;
     540              : 
     541            0 :         let mut num_rels_copied = 0;
     542            0 :         let mut num_blocks_copied = 0;
     543            0 :         for src_rel in rels {
     544            0 :             assert_eq!(src_rel.spcnode, src_tablespace_id);
     545            0 :             assert_eq!(src_rel.dbnode, src_db_id);
     546              : 
     547            0 :             let nblocks = modification
     548            0 :                 .tline
     549            0 :                 .get_rel_size(src_rel, Version::Modified(modification), ctx)
     550            0 :                 .await?;
     551            0 :             let dst_rel = RelTag {
     552            0 :                 spcnode: tablespace_id,
     553            0 :                 dbnode: db_id,
     554            0 :                 relnode: src_rel.relnode,
     555            0 :                 forknum: src_rel.forknum,
     556            0 :             };
     557              : 
     558            0 :             modification.put_rel_creation(dst_rel, nblocks, ctx).await?;
     559              : 
     560              :             // Copy content
     561            0 :             debug!("copying rel {} to {}, {} blocks", src_rel, dst_rel, nblocks);
     562            0 :             for blknum in 0..nblocks {
     563              :                 // Sharding:
     564              :                 //  - src and dst are always on the same shard, because they differ only by dbNode, and
     565              :                 //    dbNode is not included in the hash inputs for sharding.
     566              :                 //  - This WAL command is replayed on all shards, but each shard only copies the blocks
     567              :                 //    that belong to it.
     568            0 :                 let src_key = rel_block_to_key(src_rel, blknum);
     569            0 :                 if !self.shard.is_key_local(&src_key) {
     570            0 :                     debug!(
     571            0 :                         "Skipping non-local key {} during XLOG_DBASE_CREATE",
     572              :                         src_key
     573              :                     );
     574            0 :                     continue;
     575            0 :                 }
     576            0 :                 debug!(
     577            0 :                     "copying block {} from {} ({}) to {}",
     578              :                     blknum, src_rel, src_key, dst_rel
     579              :                 );
     580              : 
     581            0 :                 let content = modification
     582            0 :                     .tline
     583            0 :                     .get_rel_page_at_lsn(
     584            0 :                         src_rel,
     585            0 :                         blknum,
     586            0 :                         Version::Modified(modification),
     587            0 :                         ctx,
     588            0 :                         crate::tenant::storage_layer::IoConcurrency::sequential(),
     589            0 :                     )
     590            0 :                     .await?;
     591            0 :                 modification.put_rel_page_image(dst_rel, blknum, content)?;
     592            0 :                 num_blocks_copied += 1;
     593              :             }
     594              : 
     595            0 :             num_rels_copied += 1;
     596              :         }
     597              : 
     598            0 :         info!(
     599            0 :             "Created database {}/{}, copied {} blocks in {} rels",
     600              :             tablespace_id, db_id, num_blocks_copied, num_rels_copied
     601              :         );
     602            0 :         Ok(())
     603            0 :     }
     604              : 
     605            0 :     async fn ingest_xlog_dbase_drop(
     606            0 :         &mut self,
     607            0 :         dbase_drop: DbaseDrop,
     608            0 :         modification: &mut DatadirModification<'_>,
     609            0 :         ctx: &RequestContext,
     610            0 :     ) -> Result<(), WalIngestError> {
     611              :         let DbaseDrop {
     612            0 :             db_id,
     613            0 :             tablespace_ids,
     614            0 :         } = dbase_drop;
     615            0 :         for tablespace_id in tablespace_ids {
     616            0 :             trace!("Drop db {}, {}", tablespace_id, db_id);
     617            0 :             modification.drop_dbdir(tablespace_id, db_id, ctx).await?;
     618              :         }
     619              : 
     620            0 :         Ok(())
     621            0 :     }
     622              : 
     623            8 :     async fn ingest_xlog_smgr_create(
     624            8 :         &mut self,
     625            8 :         create: SmgrCreate,
     626            8 :         modification: &mut DatadirModification<'_>,
     627            8 :         ctx: &RequestContext,
     628            8 :     ) -> Result<(), WalIngestError> {
     629            8 :         let SmgrCreate { rel } = create;
     630            8 :         self.put_rel_creation(modification, rel, ctx).await?;
     631            8 :         Ok(())
     632            8 :     }
     633              : 
     634              :     /// Subroutine of ingest_record(), to handle an XLOG_SMGR_TRUNCATE record.
     635              :     ///
     636              :     /// This is the same logic as in PostgreSQL's smgr_redo() function.
     637            0 :     async fn ingest_xlog_smgr_truncate(
     638            0 :         &mut self,
     639            0 :         truncate: XlSmgrTruncate,
     640            0 :         modification: &mut DatadirModification<'_>,
     641            0 :         ctx: &RequestContext,
     642            0 :     ) -> Result<(), WalIngestError> {
     643              :         let XlSmgrTruncate {
     644            0 :             blkno,
     645            0 :             rnode,
     646            0 :             flags,
     647            0 :         } = truncate;
     648              : 
     649            0 :         let spcnode = rnode.spcnode;
     650            0 :         let dbnode = rnode.dbnode;
     651            0 :         let relnode = rnode.relnode;
     652              : 
     653            0 :         if flags & pg_constants::SMGR_TRUNCATE_HEAP != 0 {
     654            0 :             let rel = RelTag {
     655            0 :                 spcnode,
     656            0 :                 dbnode,
     657            0 :                 relnode,
     658            0 :                 forknum: MAIN_FORKNUM,
     659            0 :             };
     660              : 
     661            0 :             self.put_rel_truncation(modification, rel, blkno, ctx)
     662            0 :                 .await?;
     663            0 :         }
     664            0 :         if flags & pg_constants::SMGR_TRUNCATE_FSM != 0 {
     665            0 :             let rel = RelTag {
     666            0 :                 spcnode,
     667            0 :                 dbnode,
     668            0 :                 relnode,
     669            0 :                 forknum: FSM_FORKNUM,
     670            0 :             };
     671              : 
     672              :             // Zero out the last remaining FSM page, if this shard owns it. We are not precise here,
     673              :             // and instead of digging in the FSM bitmap format we just clear the whole page.
     674            0 :             let fsm_logical_page_no = blkno / pg_constants::SLOTS_PER_FSM_PAGE;
     675            0 :             let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
     676            0 :             if blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0
     677            0 :                 && self
     678            0 :                     .shard
     679            0 :                     .is_key_local(&rel_block_to_key(rel, fsm_physical_page_no))
     680              :             {
     681            0 :                 modification.put_rel_page_image_zero(rel, fsm_physical_page_no)?;
     682            0 :                 fsm_physical_page_no += 1;
     683            0 :             }
     684              :             // Truncate this shard's view of the FSM relation size, if it even has one.
     685            0 :             let nblocks = get_relsize(modification, rel, ctx).await?.unwrap_or(0);
     686            0 :             if nblocks > fsm_physical_page_no {
     687            0 :                 self.put_rel_truncation(modification, rel, fsm_physical_page_no, ctx)
     688            0 :                     .await?;
     689            0 :             }
     690            0 :         }
     691            0 :         if flags & pg_constants::SMGR_TRUNCATE_VM != 0 {
     692            0 :             let rel = RelTag {
     693            0 :                 spcnode,
     694            0 :                 dbnode,
     695            0 :                 relnode,
     696            0 :                 forknum: VISIBILITYMAP_FORKNUM,
     697            0 :             };
     698              : 
     699              :             // last remaining block, byte, and bit
     700            0 :             let mut vm_page_no = blkno / (pg_constants::VM_HEAPBLOCKS_PER_PAGE as u32);
     701            0 :             let trunc_byte = blkno as usize % pg_constants::VM_HEAPBLOCKS_PER_PAGE
     702            0 :                 / pg_constants::VM_HEAPBLOCKS_PER_BYTE;
     703            0 :             let trunc_offs = blkno as usize % pg_constants::VM_HEAPBLOCKS_PER_BYTE
     704            0 :                 * pg_constants::VM_BITS_PER_HEAPBLOCK;
     705              : 
     706              :             // Unless the new size is exactly at a visibility map page boundary, the
     707              :             // tail bits in the last remaining map page, representing truncated heap
     708              :             // blocks, need to be cleared. This is not only tidy, but also necessary
     709              :             // because we don't get a chance to clear the bits if the heap is extended
     710              :             // again. Only do this on the shard that owns the page.
     711            0 :             if (trunc_byte != 0 || trunc_offs != 0)
     712            0 :                 && self.shard.is_key_local(&rel_block_to_key(rel, vm_page_no))
     713              :             {
     714            0 :                 modification.put_rel_wal_record(
     715            0 :                     rel,
     716            0 :                     vm_page_no,
     717            0 :                     NeonWalRecord::TruncateVisibilityMap {
     718            0 :                         trunc_byte,
     719            0 :                         trunc_offs,
     720            0 :                     },
     721            0 :                 )?;
     722            0 :                 vm_page_no += 1;
     723            0 :             }
     724              :             // Truncate this shard's view of the VM relation size, if it even has one.
     725            0 :             let nblocks = get_relsize(modification, rel, ctx).await?.unwrap_or(0);
     726            0 :             if nblocks > vm_page_no {
     727            0 :                 self.put_rel_truncation(modification, rel, vm_page_no, ctx)
     728            0 :                     .await?;
     729            0 :             }
     730            0 :         }
     731            0 :         Ok(())
     732            0 :     }
     733              : 
     734            4 :     fn warn_on_ingest_lag(
     735            4 :         &mut self,
     736            4 :         conf: &crate::config::PageServerConf,
     737            4 :         wal_timestamp: TimestampTz,
     738            4 :     ) {
     739            4 :         debug_assert_current_span_has_tenant_and_timeline_id();
     740            4 :         let now = SystemTime::now();
     741            4 :         let rate_limits = &mut self.warn_ingest_lag;
     742              : 
     743            4 :         let ts = enum_pgversion_dispatch!(&self.checkpoint, CheckPoint, _cp, {
     744            0 :             pgv::xlog_utils::try_from_pg_timestamp(wal_timestamp)
     745              :         });
     746              : 
     747            4 :         match ts {
     748            4 :             Ok(ts) => {
     749            4 :                 match now.duration_since(ts) {
     750            4 :                     Ok(lag) => {
     751            4 :                         if lag > conf.wait_lsn_timeout {
     752            4 :                             rate_limits.lag_msg_ratelimit.call2(|rate_limit_stats| {
     753            1 :                                 if let Some(cooldown) = self.attach_wal_lag_cooldown.get() {
     754            0 :                                     if std::time::Instant::now() < cooldown.active_until && lag <= cooldown.max_lag {
     755            0 :                                         return;
     756            0 :                                     }
     757            1 :                                 } else {
     758            1 :                                     // Still loading? We shouldn't be here
     759            1 :                                 }
     760            1 :                                 let lag = humantime::format_duration(lag);
     761            1 :                                 warn!(%rate_limit_stats, %lag, "ingesting record with timestamp lagging more than wait_lsn_timeout");
     762            1 :                             })
     763            0 :                         }
     764              :                     }
     765            0 :                     Err(e) => {
     766            0 :                         let delta_t = e.duration();
     767              :                         // determined by prod victoriametrics query: 1000 * (timestamp(node_time_seconds{neon_service="pageserver"}) - node_time_seconds)
     768              :                         // => https://www.robustperception.io/time-metric-from-the-node-exporter/
     769              :                         const IGNORED_DRIFT: Duration = Duration::from_millis(100);
     770            0 :                         if delta_t > IGNORED_DRIFT {
     771            0 :                             let delta_t = humantime::format_duration(delta_t);
     772            0 :                             rate_limits.future_lsn_msg_ratelimit.call2(|rate_limit_stats| {
     773            0 :                                 warn!(%rate_limit_stats, %delta_t, "ingesting record with timestamp from future");
     774            0 :                             })
     775            0 :                         }
     776              :                     }
     777              :                 };
     778              :             }
     779            0 :             Err(error) => {
     780            0 :                 rate_limits.timestamp_invalid_msg_ratelimit.call2(|rate_limit_stats| {
     781            0 :                     warn!(%rate_limit_stats, %error, "ingesting record with invalid timestamp, cannot calculate lag and will fail find-lsn-for-timestamp type queries");
     782            0 :                 })
     783              :             }
     784              :         }
     785            4 :     }
     786              : 
     787              :     /// Subroutine of ingest_record(), to handle an XLOG_XACT_* records.
     788              :     ///
     789            4 :     async fn ingest_xact_record(
     790            4 :         &mut self,
     791            4 :         record: XactRecord,
     792            4 :         modification: &mut DatadirModification<'_>,
     793            4 :         ctx: &RequestContext,
     794            4 :     ) -> Result<(), WalIngestError> {
     795            4 :         let (xact_common, is_commit, is_prepared) = match record {
     796            0 :             XactRecord::Prepare(XactPrepare { xl_xid, data }) => {
     797            0 :                 let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
     798            0 :                     self.adjust_to_full_transaction_id(xl_xid)?
     799              :                 } else {
     800            0 :                     xl_xid as u64
     801              :                 };
     802            0 :                 return modification.put_twophase_file(xid, data, ctx).await;
     803              :             }
     804            4 :             XactRecord::Commit(common) => (common, true, false),
     805            0 :             XactRecord::Abort(common) => (common, false, false),
     806            0 :             XactRecord::CommitPrepared(common) => (common, true, true),
     807            0 :             XactRecord::AbortPrepared(common) => (common, false, true),
     808              :         };
     809              : 
     810              :         let XactCommon {
     811            4 :             parsed,
     812            4 :             origin_id,
     813            4 :             xl_xid,
     814            4 :             lsn,
     815            4 :         } = xact_common;
     816              : 
     817              :         // Record update of CLOG pages
     818            4 :         let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
     819            4 :         let mut segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
     820            4 :         let mut rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
     821            4 :         let mut page_xids: Vec<TransactionId> = vec![parsed.xid];
     822              : 
     823            4 :         self.warn_on_ingest_lag(modification.tline.conf, parsed.xact_time);
     824              : 
     825            4 :         for subxact in &parsed.subxacts {
     826            0 :             let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
     827            0 :             if subxact_pageno != pageno {
     828              :                 // This subxact goes to different page. Write the record
     829              :                 // for all the XIDs on the previous page, and continue
     830              :                 // accumulating XIDs on this new page.
     831            0 :                 modification.put_slru_wal_record(
     832            0 :                     SlruKind::Clog,
     833            0 :                     segno,
     834            0 :                     rpageno,
     835            0 :                     if is_commit {
     836            0 :                         NeonWalRecord::ClogSetCommitted {
     837            0 :                             xids: page_xids,
     838            0 :                             timestamp: parsed.xact_time,
     839            0 :                         }
     840              :                     } else {
     841            0 :                         NeonWalRecord::ClogSetAborted { xids: page_xids }
     842              :                     },
     843            0 :                 )?;
     844            0 :                 page_xids = Vec::new();
     845            0 :             }
     846            0 :             pageno = subxact_pageno;
     847            0 :             segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
     848            0 :             rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
     849            0 :             page_xids.push(*subxact);
     850              :         }
     851            4 :         modification.put_slru_wal_record(
     852            4 :             SlruKind::Clog,
     853            4 :             segno,
     854            4 :             rpageno,
     855            4 :             if is_commit {
     856            4 :                 NeonWalRecord::ClogSetCommitted {
     857            4 :                     xids: page_xids,
     858            4 :                     timestamp: parsed.xact_time,
     859            4 :                 }
     860              :             } else {
     861            0 :                 NeonWalRecord::ClogSetAborted { xids: page_xids }
     862              :             },
     863            0 :         )?;
     864              : 
     865              :         // Group relations to drop by dbNode.  This map will contain all relations that _might_
     866              :         // exist, we will reduce it to which ones really exist later.  This map can be huge if
     867              :         // the transaction touches a huge number of relations (there is no bound on this in
     868              :         // postgres).
     869            4 :         let mut drop_relations: HashMap<(u32, u32), Vec<RelTag>> = HashMap::new();
     870              : 
     871            4 :         for xnode in &parsed.xnodes {
     872            0 :             for forknum in MAIN_FORKNUM..=INIT_FORKNUM {
     873            0 :                 let rel = RelTag {
     874            0 :                     forknum,
     875            0 :                     spcnode: xnode.spcnode,
     876            0 :                     dbnode: xnode.dbnode,
     877            0 :                     relnode: xnode.relnode,
     878            0 :                 };
     879            0 :                 drop_relations
     880            0 :                     .entry((xnode.spcnode, xnode.dbnode))
     881            0 :                     .or_default()
     882            0 :                     .push(rel);
     883            0 :             }
     884              :         }
     885              : 
     886              :         // Execute relation drops in a batch: the number may be huge, so deleting individually is prohibitively expensive
     887            4 :         modification.put_rel_drops(drop_relations, ctx).await?;
     888              : 
     889            4 :         if origin_id != 0 {
     890            0 :             modification
     891            0 :                 .set_replorigin(origin_id, parsed.origin_lsn)
     892            0 :                 .await?;
     893            4 :         }
     894              : 
     895            4 :         if is_prepared {
     896              :             // Remove twophase file. see RemoveTwoPhaseFile() in postgres code
     897            0 :             trace!(
     898            0 :                 "Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
     899              :                 xl_xid, parsed.xid, lsn,
     900              :             );
     901              : 
     902            0 :             let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
     903            0 :                 self.adjust_to_full_transaction_id(parsed.xid)?
     904              :             } else {
     905            0 :                 parsed.xid as u64
     906              :             };
     907            0 :             modification.drop_twophase_file(xid, ctx).await?;
     908            4 :         }
     909              : 
     910            4 :         Ok(())
     911            4 :     }
     912              : 
     913            0 :     async fn ingest_clog_truncate(
     914            0 :         &mut self,
     915            0 :         truncate: ClogTruncate,
     916            0 :         modification: &mut DatadirModification<'_>,
     917            0 :         ctx: &RequestContext,
     918            0 :     ) -> Result<(), WalIngestError> {
     919              :         let ClogTruncate {
     920            0 :             pageno,
     921            0 :             oldest_xid,
     922            0 :             oldest_xid_db,
     923            0 :         } = truncate;
     924              : 
     925            0 :         info!(
     926            0 :             "RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {}",
     927              :             pageno, oldest_xid, oldest_xid_db
     928              :         );
     929              : 
     930              :         // In Postgres, oldestXid and oldestXidDB are updated in memory when the CLOG is
     931              :         // truncated, but a checkpoint record with the updated values isn't written until
     932              :         // later. In Neon, a server can start at any LSN, not just on a checkpoint record,
     933              :         // so we keep the oldestXid and oldestXidDB up-to-date.
     934            0 :         enum_pgversion_dispatch!(&mut self.checkpoint, CheckPoint, cp, {
     935            0 :             cp.oldestXid = oldest_xid;
     936            0 :             cp.oldestXidDB = oldest_xid_db;
     937            0 :         });
     938            0 :         self.checkpoint_modified = true;
     939              : 
     940              :         // TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
     941              : 
     942            0 :         let latest_page_number =
     943            0 :             enum_pgversion_dispatch!(self.checkpoint, CheckPoint, cp, { cp.nextXid.value }) as u32
     944              :                 / pg_constants::CLOG_XACTS_PER_PAGE;
     945              : 
     946              :         // Now delete all segments containing pages between xlrec.pageno
     947              :         // and latest_page_number.
     948              : 
     949              :         // First, make an important safety check:
     950              :         // the current endpoint page must not be eligible for removal.
     951              :         // See SimpleLruTruncate() in slru.c
     952            0 :         if dispatch_pgversion!(modification.tline.pg_version, {
     953            0 :             pgv::nonrelfile_utils::clogpage_precedes(latest_page_number, pageno)
     954              :         }) {
     955            0 :             info!("could not truncate directory pg_xact apparent wraparound");
     956            0 :             return Ok(());
     957            0 :         }
     958              : 
     959              :         // Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
     960              :         //
     961              :         // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
     962              :         // will block waiting for the last valid LSN to advance up to
     963              :         // it. So we use the previous record's LSN in the get calls
     964              :         // instead.
     965            0 :         if modification.tline.get_shard_identity().is_shard_zero() {
     966            0 :             for segno in modification
     967            0 :                 .tline
     968            0 :                 .list_slru_segments(SlruKind::Clog, Version::Modified(modification), ctx)
     969            0 :                 .await?
     970              :             {
     971            0 :                 let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
     972              : 
     973            0 :                 let may_delete = dispatch_pgversion!(modification.tline.pg_version, {
     974            0 :                     pgv::nonrelfile_utils::slru_may_delete_clogsegment(segpage, pageno)
     975              :                 });
     976              : 
     977            0 :                 if may_delete {
     978            0 :                     modification
     979            0 :                         .drop_slru_segment(SlruKind::Clog, segno, ctx)
     980            0 :                         .await?;
     981            0 :                     trace!("Drop CLOG segment {:>04X}", segno);
     982            0 :                 }
     983              :             }
     984            0 :         }
     985              : 
     986            0 :         Ok(())
     987            0 :     }
     988              : 
     989            0 :     async fn ingest_clog_zero_page(
     990            0 :         &mut self,
     991            0 :         zero_page: ClogZeroPage,
     992            0 :         modification: &mut DatadirModification<'_>,
     993            0 :         ctx: &RequestContext,
     994            0 :     ) -> Result<(), WalIngestError> {
     995            0 :         let ClogZeroPage { segno, rpageno } = zero_page;
     996              : 
     997            0 :         self.put_slru_page_image(
     998            0 :             modification,
     999            0 :             SlruKind::Clog,
    1000            0 :             segno,
    1001            0 :             rpageno,
    1002            0 :             ZERO_PAGE.clone(),
    1003            0 :             ctx,
    1004            0 :         )
    1005            0 :         .await
    1006            0 :     }
    1007              : 
    1008            0 :     fn ingest_multixact_create(
    1009            0 :         &mut self,
    1010            0 :         modification: &mut DatadirModification,
    1011            0 :         xlrec: &XlMultiXactCreate,
    1012            0 :     ) -> Result<(), WalIngestError> {
    1013              :         // Create WAL record for updating the multixact-offsets page
    1014            0 :         let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
    1015            0 :         let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
    1016            0 :         let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
    1017              : 
    1018            0 :         modification.put_slru_wal_record(
    1019            0 :             SlruKind::MultiXactOffsets,
    1020            0 :             segno,
    1021            0 :             rpageno,
    1022            0 :             NeonWalRecord::MultixactOffsetCreate {
    1023            0 :                 mid: xlrec.mid,
    1024            0 :                 moff: xlrec.moff,
    1025            0 :             },
    1026            0 :         )?;
    1027              : 
    1028              :         // Create WAL records for the update of each affected multixact-members page
    1029            0 :         let mut members = xlrec.members.iter();
    1030            0 :         let mut offset = xlrec.moff;
    1031              :         loop {
    1032            0 :             let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
    1033              : 
    1034              :             // How many members fit on this page?
    1035            0 :             let page_remain = pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
    1036            0 :                 - offset % pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
    1037              : 
    1038            0 :             let mut this_page_members: Vec<MultiXactMember> = Vec::new();
    1039            0 :             for _ in 0..page_remain {
    1040            0 :                 if let Some(m) = members.next() {
    1041            0 :                     this_page_members.push(m.clone());
    1042            0 :                 } else {
    1043            0 :                     break;
    1044              :                 }
    1045              :             }
    1046            0 :             if this_page_members.is_empty() {
    1047              :                 // all done
    1048            0 :                 break;
    1049            0 :             }
    1050            0 :             let n_this_page = this_page_members.len();
    1051              : 
    1052            0 :             modification.put_slru_wal_record(
    1053            0 :                 SlruKind::MultiXactMembers,
    1054            0 :                 pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
    1055            0 :                 pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
    1056            0 :                 NeonWalRecord::MultixactMembersCreate {
    1057            0 :                     moff: offset,
    1058            0 :                     members: this_page_members,
    1059            0 :                 },
    1060            0 :             )?;
    1061              : 
    1062              :             // Note: The multixact members can wrap around, even within one WAL record.
    1063            0 :             offset = offset.wrapping_add(n_this_page as u32);
    1064              :         }
    1065            0 :         let next_offset = offset;
    1066            0 :         assert!(xlrec.moff.wrapping_add(xlrec.nmembers) == next_offset);
    1067              : 
    1068              :         // Update next-multi-xid and next-offset
    1069              :         //
    1070              :         // NB: In PostgreSQL, the next-multi-xid stored in the control file is allowed to
    1071              :         // go to 0, and it's fixed up by skipping to FirstMultiXactId in functions that
    1072              :         // read it, like GetNewMultiXactId(). This is different from how nextXid is
    1073              :         // incremented! nextXid skips over < FirstNormalTransactionId when the value
    1074              :         // is stored, so it's never 0 in a checkpoint.
    1075              :         //
    1076              :         // I don't know why it's done that way, it seems less error-prone to skip over 0
    1077              :         // when the value is stored rather than when it's read. But let's do it the same
    1078              :         // way here.
    1079            0 :         let next_multi_xid = xlrec.mid.wrapping_add(1);
    1080              : 
    1081            0 :         if self
    1082            0 :             .checkpoint
    1083            0 :             .update_next_multixid(next_multi_xid, next_offset)
    1084            0 :         {
    1085            0 :             self.checkpoint_modified = true;
    1086            0 :         }
    1087              : 
    1088              :         // Also update the next-xid with the highest member. According to the comments in
    1089              :         // multixact_redo(), this shouldn't be necessary, but let's do the same here.
    1090            0 :         let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| {
    1091            0 :             if let Some(max_xid) = acc {
    1092            0 :                 if mbr.xid.wrapping_sub(max_xid) as i32 > 0 {
    1093            0 :                     Some(mbr.xid)
    1094              :                 } else {
    1095            0 :                     acc
    1096              :                 }
    1097              :             } else {
    1098            0 :                 Some(mbr.xid)
    1099              :             }
    1100            0 :         });
    1101              : 
    1102            0 :         if let Some(max_xid) = max_mbr_xid {
    1103            0 :             if self.checkpoint.update_next_xid(max_xid) {
    1104            0 :                 self.checkpoint_modified = true;
    1105            0 :             }
    1106            0 :         }
    1107            0 :         Ok(())
    1108            0 :     }
    1109              : 
    1110            0 :     async fn ingest_multixact_truncate(
    1111            0 :         &mut self,
    1112            0 :         modification: &mut DatadirModification<'_>,
    1113            0 :         xlrec: &XlMultiXactTruncate,
    1114            0 :         ctx: &RequestContext,
    1115            0 :     ) -> Result<(), WalIngestError> {
    1116            0 :         let (maxsegment, startsegment, endsegment) =
    1117            0 :             enum_pgversion_dispatch!(&mut self.checkpoint, CheckPoint, cp, {
    1118            0 :                 cp.oldestMulti = xlrec.end_trunc_off;
    1119            0 :                 cp.oldestMultiDB = xlrec.oldest_multi_db;
    1120            0 :                 let maxsegment: i32 = pgv::nonrelfile_utils::mx_offset_to_member_segment(
    1121              :                     pg_constants::MAX_MULTIXACT_OFFSET,
    1122              :                 );
    1123            0 :                 let startsegment: i32 =
    1124            0 :                     pgv::nonrelfile_utils::mx_offset_to_member_segment(xlrec.start_trunc_memb);
    1125            0 :                 let endsegment: i32 =
    1126            0 :                     pgv::nonrelfile_utils::mx_offset_to_member_segment(xlrec.end_trunc_memb);
    1127            0 :                 (maxsegment, startsegment, endsegment)
    1128              :             });
    1129              : 
    1130            0 :         self.checkpoint_modified = true;
    1131              : 
    1132              :         // PerformMembersTruncation
    1133            0 :         let mut segment: i32 = startsegment;
    1134              : 
    1135              :         // Delete all the segments except the last one. The last segment can still
    1136              :         // contain, possibly partially, valid data.
    1137            0 :         if modification.tline.get_shard_identity().is_shard_zero() {
    1138            0 :             while segment != endsegment {
    1139            0 :                 modification
    1140            0 :                     .drop_slru_segment(SlruKind::MultiXactMembers, segment as u32, ctx)
    1141            0 :                     .await?;
    1142              : 
    1143              :                 /* move to next segment, handling wraparound correctly */
    1144            0 :                 if segment == maxsegment {
    1145            0 :                     segment = 0;
    1146            0 :                 } else {
    1147            0 :                     segment += 1;
    1148            0 :                 }
    1149              :             }
    1150            0 :         }
    1151              : 
    1152              :         // Truncate offsets
    1153              :         // FIXME: this did not handle wraparound correctly
    1154              : 
    1155            0 :         Ok(())
    1156            0 :     }
    1157              : 
    1158            0 :     async fn ingest_multixact_zero_page(
    1159            0 :         &mut self,
    1160            0 :         zero_page: MultiXactZeroPage,
    1161            0 :         modification: &mut DatadirModification<'_>,
    1162            0 :         ctx: &RequestContext,
    1163            0 :     ) -> Result<(), WalIngestError> {
    1164              :         let MultiXactZeroPage {
    1165            0 :             slru_kind,
    1166            0 :             segno,
    1167            0 :             rpageno,
    1168            0 :         } = zero_page;
    1169            0 :         self.put_slru_page_image(
    1170            0 :             modification,
    1171            0 :             slru_kind,
    1172            0 :             segno,
    1173            0 :             rpageno,
    1174            0 :             ZERO_PAGE.clone(),
    1175            0 :             ctx,
    1176            0 :         )
    1177            0 :         .await
    1178            0 :     }
    1179              : 
    1180            0 :     async fn ingest_relmap_update(
    1181            0 :         &mut self,
    1182            0 :         update: RelmapUpdate,
    1183            0 :         modification: &mut DatadirModification<'_>,
    1184            0 :         ctx: &RequestContext,
    1185            0 :     ) -> Result<(), WalIngestError> {
    1186            0 :         let RelmapUpdate { update, buf } = update;
    1187              : 
    1188            0 :         modification
    1189            0 :             .put_relmap_file(update.tsid, update.dbid, buf, ctx)
    1190            0 :             .await
    1191            0 :     }
    1192              : 
    1193           15 :     async fn ingest_raw_xlog_record(
    1194           15 :         &mut self,
    1195           15 :         raw_record: RawXlogRecord,
    1196           15 :         modification: &mut DatadirModification<'_>,
    1197           15 :         ctx: &RequestContext,
    1198           15 :     ) -> Result<(), WalIngestError> {
    1199           15 :         let RawXlogRecord { info, lsn, mut buf } = raw_record;
    1200           15 :         let pg_version = modification.tline.pg_version;
    1201              : 
    1202           15 :         if info == pg_constants::XLOG_PARAMETER_CHANGE {
    1203            1 :             if let CheckPoint::V17(cp) = &mut self.checkpoint {
    1204            0 :                 let rec = v17::XlParameterChange::decode(&mut buf);
    1205            0 :                 cp.wal_level = rec.wal_level;
    1206            0 :                 self.checkpoint_modified = true;
    1207            1 :             }
    1208           14 :         } else if info == pg_constants::XLOG_END_OF_RECOVERY {
    1209            0 :             if let CheckPoint::V17(cp) = &mut self.checkpoint {
    1210            0 :                 let rec = v17::XlEndOfRecovery::decode(&mut buf);
    1211            0 :                 cp.wal_level = rec.wal_level;
    1212            0 :                 self.checkpoint_modified = true;
    1213            0 :             }
    1214           14 :         }
    1215              : 
    1216           15 :         enum_pgversion_dispatch!(&mut self.checkpoint, CheckPoint, cp, {
    1217            0 :             if info == pg_constants::XLOG_NEXTOID {
    1218            0 :                 let next_oid = buf.get_u32_le();
    1219            0 :                 if cp.nextOid != next_oid {
    1220            0 :                     cp.nextOid = next_oid;
    1221            0 :                     self.checkpoint_modified = true;
    1222            0 :                 }
    1223            0 :             } else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
    1224            0 :                 || info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
    1225              :             {
    1226            0 :                 let mut checkpoint_bytes = [0u8; pgv::xlog_utils::SIZEOF_CHECKPOINT];
    1227            0 :                 buf.copy_to_slice(&mut checkpoint_bytes);
    1228            0 :                 let xlog_checkpoint = pgv::CheckPoint::decode(&checkpoint_bytes)?;
    1229            0 :                 trace!(
    1230            0 :                     "xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
    1231              :                     xlog_checkpoint.oldestXid, cp.oldestXid
    1232              :                 );
    1233            0 :                 if (cp.oldestXid.wrapping_sub(xlog_checkpoint.oldestXid) as i32) < 0 {
    1234            0 :                     cp.oldestXid = xlog_checkpoint.oldestXid;
    1235            0 :                 }
    1236            0 :                 trace!(
    1237            0 :                     "xlog_checkpoint.oldestActiveXid={}, checkpoint.oldestActiveXid={}",
    1238              :                     xlog_checkpoint.oldestActiveXid, cp.oldestActiveXid
    1239              :                 );
    1240              : 
    1241              :                 // A shutdown checkpoint has `oldestActiveXid == InvalidTransactionid`,
    1242              :                 // because at shutdown, all in-progress transactions will implicitly
    1243              :                 // end. Postgres startup code knows that, and allows hot standby to start
    1244              :                 // immediately from a shutdown checkpoint.
    1245              :                 //
    1246              :                 // In Neon, Postgres hot standby startup always behaves as if starting from
    1247              :                 // an online checkpoint. It needs a valid `oldestActiveXid` value, so
    1248              :                 // instead of overwriting self.checkpoint.oldestActiveXid with
    1249              :                 // InvalidTransactionid from the checkpoint WAL record, update it to a
    1250              :                 // proper value, knowing that there are no in-progress transactions at this
    1251              :                 // point, except for prepared transactions.
    1252              :                 //
    1253              :                 // See also the neon code changes in the InitWalRecovery() function.
    1254            0 :                 if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID
    1255            0 :                     && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
    1256              :                 {
    1257            0 :                     let oldest_active_xid = if pg_version >= PgMajorVersion::PG17 {
    1258            0 :                         let mut oldest_active_full_xid = cp.nextXid.value;
    1259            0 :                         for xid in modification.tline.list_twophase_files(lsn, ctx).await? {
    1260            0 :                             if xid < oldest_active_full_xid {
    1261            0 :                                 oldest_active_full_xid = xid;
    1262            0 :                             }
    1263              :                         }
    1264            0 :                         oldest_active_full_xid as u32
    1265              :                     } else {
    1266            0 :                         let mut oldest_active_xid = cp.nextXid.value as u32;
    1267            0 :                         for xid in modification.tline.list_twophase_files(lsn, ctx).await? {
    1268            0 :                             let narrow_xid = xid as u32;
    1269            0 :                             if (narrow_xid.wrapping_sub(oldest_active_xid) as i32) < 0 {
    1270            0 :                                 oldest_active_xid = narrow_xid;
    1271            0 :                             }
    1272              :                         }
    1273            0 :                         oldest_active_xid
    1274              :                     };
    1275            0 :                     cp.oldestActiveXid = oldest_active_xid;
    1276            0 :                 } else {
    1277            0 :                     cp.oldestActiveXid = xlog_checkpoint.oldestActiveXid;
    1278            0 :                 }
    1279              :                 // NB: We abuse the Checkpoint.redo field:
    1280              :                 //
    1281              :                 // - In PostgreSQL, the Checkpoint struct doesn't store the information
    1282              :                 //   of whether this is an online checkpoint or a shutdown checkpoint. It's
    1283              :                 //   stored in the XLOG info field of the WAL record, shutdown checkpoints
    1284              :                 //   use record type XLOG_CHECKPOINT_SHUTDOWN and online checkpoints use
    1285              :                 //   XLOG_CHECKPOINT_ONLINE. We don't store the original WAL record headers
    1286              :                 //   in the pageserver, however.
    1287              :                 //
    1288              :                 // - In PostgreSQL, the Checkpoint.redo field stores the *start* of the
    1289              :                 //   checkpoint record, if it's a shutdown checkpoint. But when we are
    1290              :                 //   starting from a shutdown checkpoint, the basebackup LSN is the *end*
    1291              :                 //   of the shutdown checkpoint WAL record. That makes it difficult to
    1292              :                 //   correctly detect whether we're starting from a shutdown record or
    1293              :                 //   not.
    1294              :                 //
    1295              :                 // To address both of those issues, we store 0 in the redo field if it's
    1296              :                 // an online checkpoint record, and the record's *end* LSN if it's a
    1297              :                 // shutdown checkpoint. We don't need the original redo pointer in neon,
    1298              :                 // because we don't perform WAL replay at startup anyway, so we can get
    1299              :                 // away with abusing the redo field like this.
    1300              :                 //
    1301              :                 // XXX: Ideally, we would persist the extra information in a more
    1302              :                 // explicit format, rather than repurpose the fields of the Postgres
    1303              :                 // struct like this. However, we already have persisted data like this,
    1304              :                 // so we need to maintain backwards compatibility.
    1305              :                 //
    1306              :                 // NB: We didn't originally have this convention, so there are still old
    1307              :                 // persisted records that didn't do this. Before, we didn't update the
    1308              :                 // persisted redo field at all. That means that old records have a bogus
    1309              :                 // redo pointer that points to some old value, from the checkpoint record
    1310              :                 // that was originally imported from the data directory. If it was a
    1311              :                 // project created in Neon, that means it points to the first checkpoint
    1312              :                 // after initdb. That's OK for our purposes: all such old checkpoints are
    1313              :                 // treated as old online checkpoints when the basebackup is created.
    1314            0 :                 cp.redo = if info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN {
    1315              :                     // Store the *end* LSN of the checkpoint record. Or to be precise,
    1316              :                     // the start LSN of the *next* record, i.e. if the record ends
    1317              :                     // exactly at page boundary, the redo LSN points to just after the
    1318              :                     // page header on the next page.
    1319            0 :                     lsn.into()
    1320              :                 } else {
    1321            0 :                     Lsn::INVALID.into()
    1322              :                 };
    1323              : 
    1324              :                 // Write a new checkpoint key-value pair on every checkpoint record, even
    1325              :                 // if nothing really changed. Not strictly required, but it seems nice to
    1326              :                 // have some trace of the checkpoint records in the layer files at the same
    1327              :                 // LSNs.
    1328            0 :                 self.checkpoint_modified = true;
    1329            0 :             }
    1330              :         });
    1331              : 
    1332           15 :         if info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN {
    1333            1 :             modification.tline.prepare_basebackup(lsn);
    1334           14 :         }
    1335              : 
    1336           15 :         Ok(())
    1337           15 :     }
    1338              : 
    1339            0 :     async fn ingest_logical_message_put(
    1340            0 :         &mut self,
    1341            0 :         put: PutLogicalMessage,
    1342            0 :         modification: &mut DatadirModification<'_>,
    1343            0 :         ctx: &RequestContext,
    1344            0 :     ) -> Result<(), WalIngestError> {
    1345            0 :         let PutLogicalMessage { path, buf } = put;
    1346            0 :         modification.put_file(path.as_str(), &buf, ctx).await
    1347            0 :     }
    1348              : 
    1349            0 :     fn ingest_standby_record(&mut self, record: StandbyRecord) -> Result<(), WalIngestError> {
    1350            0 :         match record {
    1351            0 :             StandbyRecord::RunningXacts(running_xacts) => {
    1352            0 :                 enum_pgversion_dispatch!(&mut self.checkpoint, CheckPoint, cp, {
    1353            0 :                     cp.oldestActiveXid = running_xacts.oldest_running_xid;
    1354            0 :                 });
    1355              : 
    1356            0 :                 self.checkpoint_modified = true;
    1357              :             }
    1358              :         }
    1359              : 
    1360            0 :         Ok(())
    1361            0 :     }
    1362              : 
    1363            0 :     async fn ingest_replorigin_record(
    1364            0 :         &mut self,
    1365            0 :         record: ReploriginRecord,
    1366            0 :         modification: &mut DatadirModification<'_>,
    1367            0 :     ) -> Result<(), WalIngestError> {
    1368            0 :         match record {
    1369            0 :             ReploriginRecord::Set(set) => {
    1370            0 :                 modification
    1371            0 :                     .set_replorigin(set.node_id, set.remote_lsn)
    1372            0 :                     .await?;
    1373              :             }
    1374            0 :             ReploriginRecord::Drop(drop) => {
    1375            0 :                 modification.drop_replorigin(drop.node_id).await?;
    1376              :             }
    1377              :         }
    1378              : 
    1379            0 :         Ok(())
    1380            0 :     }
    1381              : 
    1382            9 :     async fn put_rel_creation(
    1383            9 :         &mut self,
    1384            9 :         modification: &mut DatadirModification<'_>,
    1385            9 :         rel: RelTag,
    1386            9 :         ctx: &RequestContext,
    1387            9 :     ) -> Result<(), WalIngestError> {
    1388            9 :         modification.put_rel_creation(rel, 0, ctx).await?;
    1389            9 :         Ok(())
    1390            9 :     }
    1391              : 
    1392              :     #[cfg(test)]
    1393       136201 :     async fn put_rel_page_image(
    1394       136201 :         &mut self,
    1395       136201 :         modification: &mut DatadirModification<'_>,
    1396       136201 :         rel: RelTag,
    1397       136201 :         blknum: BlockNumber,
    1398       136201 :         img: Bytes,
    1399       136201 :         ctx: &RequestContext,
    1400       136201 :     ) -> Result<(), WalIngestError> {
    1401       136201 :         self.handle_rel_extend(modification, rel, blknum, ctx)
    1402       136201 :             .await?;
    1403       136201 :         modification.put_rel_page_image(rel, blknum, img)?;
    1404       136201 :         Ok(())
    1405       136201 :     }
    1406              : 
    1407            6 :     async fn put_rel_wal_record(
    1408            6 :         &mut self,
    1409            6 :         modification: &mut DatadirModification<'_>,
    1410            6 :         rel: RelTag,
    1411            6 :         blknum: BlockNumber,
    1412            6 :         rec: NeonWalRecord,
    1413            6 :         ctx: &RequestContext,
    1414            6 :     ) -> Result<(), WalIngestError> {
    1415            6 :         self.handle_rel_extend(modification, rel, blknum, ctx)
    1416            6 :             .await?;
    1417            6 :         modification.put_rel_wal_record(rel, blknum, rec)?;
    1418            6 :         Ok(())
    1419            6 :     }
    1420              : 
    1421         3006 :     async fn put_rel_truncation(
    1422         3006 :         &mut self,
    1423         3006 :         modification: &mut DatadirModification<'_>,
    1424         3006 :         rel: RelTag,
    1425         3006 :         nblocks: BlockNumber,
    1426         3006 :         ctx: &RequestContext,
    1427         3006 :     ) -> Result<(), WalIngestError> {
    1428         3006 :         modification.put_rel_truncation(rel, nblocks, ctx).await?;
    1429         3006 :         Ok(())
    1430         3006 :     }
    1431              : 
    1432       136207 :     async fn handle_rel_extend(
    1433       136207 :         &mut self,
    1434       136207 :         modification: &mut DatadirModification<'_>,
    1435       136207 :         rel: RelTag,
    1436       136207 :         blknum: BlockNumber,
    1437       136207 :         ctx: &RequestContext,
    1438       136207 :     ) -> Result<(), WalIngestError> {
    1439       136207 :         let new_nblocks = blknum + 1;
    1440              :         // Check if the relation exists. We implicitly create relations on first
    1441              :         // record.
    1442       136207 :         let old_nblocks = modification.create_relation_if_required(rel, ctx).await?;
    1443              : 
    1444       136207 :         if new_nblocks > old_nblocks {
    1445              :             //info!("extending {} {} to {}", rel, old_nblocks, new_nblocks);
    1446       136199 :             modification.put_rel_extend(rel, new_nblocks, ctx).await?;
    1447              : 
    1448       136199 :             let mut key = rel_block_to_key(rel, blknum);
    1449              : 
    1450              :             // fill the gap with zeros
    1451       136199 :             let mut gap_blocks_filled: u64 = 0;
    1452       136199 :             for gap_blknum in old_nblocks..blknum {
    1453         1499 :                 key.field6 = gap_blknum;
    1454              : 
    1455         1499 :                 if self.shard.get_shard_number(&key) != self.shard.number {
    1456            0 :                     continue;
    1457         1499 :                 }
    1458              : 
    1459         1499 :                 modification.put_rel_page_image_zero(rel, gap_blknum)?;
    1460         1499 :                 gap_blocks_filled += 1;
    1461              :             }
    1462              : 
    1463       136199 :             WAL_INGEST
    1464       136199 :                 .gap_blocks_zeroed_on_rel_extend
    1465       136199 :                 .inc_by(gap_blocks_filled);
    1466              : 
    1467              :             // Log something when relation extends cause use to fill gaps
    1468              :             // with zero pages. Logging is rate limited per pg version to
    1469              :             // avoid skewing.
    1470       136199 :             if gap_blocks_filled > 0 {
    1471              :                 use std::sync::Mutex;
    1472              : 
    1473              :                 use once_cell::sync::Lazy;
    1474              :                 use utils::rate_limit::RateLimit;
    1475              : 
    1476              :                 struct RateLimitPerPgVersion {
    1477              :                     rate_limiters: [Lazy<Mutex<RateLimit>>; 4],
    1478              :                 }
    1479              : 
    1480              :                 impl RateLimitPerPgVersion {
    1481            0 :                     const fn new() -> Self {
    1482              :                         Self {
    1483              :                             rate_limiters: [const {
    1484            1 :                                 Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(30))))
    1485              :                             }; 4],
    1486              :                         }
    1487            0 :                     }
    1488              : 
    1489            2 :                     const fn rate_limiter(
    1490            2 :                         &self,
    1491            2 :                         pg_version: PgMajorVersion,
    1492            2 :                     ) -> Option<&Lazy<Mutex<RateLimit>>> {
    1493              :                         const MIN_PG_VERSION: u32 = PgMajorVersion::PG14.major_version_num();
    1494              :                         const MAX_PG_VERSION: u32 = PgMajorVersion::PG17.major_version_num();
    1495            2 :                         let pg_version = pg_version.major_version_num();
    1496              : 
    1497            2 :                         if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION {
    1498            0 :                             return None;
    1499            2 :                         }
    1500              : 
    1501            2 :                         Some(&self.rate_limiters[(pg_version - MIN_PG_VERSION) as usize])
    1502            2 :                     }
    1503              :                 }
    1504              : 
    1505              :                 static LOGGED: RateLimitPerPgVersion = RateLimitPerPgVersion::new();
    1506            2 :                 if let Some(rate_limiter) = LOGGED.rate_limiter(modification.tline.pg_version) {
    1507            2 :                     if let Ok(mut locked) = rate_limiter.try_lock() {
    1508            2 :                         locked.call(|| {
    1509            1 :                             info!(
    1510            0 :                                 lsn=%modification.get_lsn(),
    1511              :                                 pg_version=%modification.tline.pg_version,
    1512              :                                 rel=%rel,
    1513            0 :                                 "Filled {} gap blocks on rel extend to {} from {}",
    1514              :                                 gap_blocks_filled,
    1515              :                                 new_nblocks,
    1516              :                                 old_nblocks);
    1517            1 :                         });
    1518            0 :                     }
    1519            0 :                 }
    1520       136197 :             }
    1521            8 :         }
    1522       136207 :         Ok(())
    1523       136207 :     }
    1524              : 
    1525            0 :     async fn put_slru_page_image(
    1526            0 :         &mut self,
    1527            0 :         modification: &mut DatadirModification<'_>,
    1528            0 :         kind: SlruKind,
    1529            0 :         segno: u32,
    1530            0 :         blknum: BlockNumber,
    1531            0 :         img: Bytes,
    1532            0 :         ctx: &RequestContext,
    1533            0 :     ) -> Result<(), WalIngestError> {
    1534            0 :         if !self.shard.is_shard_zero() {
    1535            0 :             return Ok(());
    1536            0 :         }
    1537              : 
    1538            0 :         self.handle_slru_extend(modification, kind, segno, blknum, ctx)
    1539            0 :             .await?;
    1540            0 :         modification.put_slru_page_image(kind, segno, blknum, img)?;
    1541            0 :         Ok(())
    1542            0 :     }
    1543              : 
    1544            0 :     async fn handle_slru_extend(
    1545            0 :         &mut self,
    1546            0 :         modification: &mut DatadirModification<'_>,
    1547            0 :         kind: SlruKind,
    1548            0 :         segno: u32,
    1549            0 :         blknum: BlockNumber,
    1550            0 :         ctx: &RequestContext,
    1551            0 :     ) -> Result<(), WalIngestError> {
    1552              :         // we don't use a cache for this like we do for relations. SLRUS are explcitly
    1553              :         // extended with ZEROPAGE records, not with commit records, so it happens
    1554              :         // a lot less frequently.
    1555              : 
    1556            0 :         let new_nblocks = blknum + 1;
    1557              :         // Check if the relation exists. We implicitly create relations on first
    1558              :         // record.
    1559              :         // TODO: would be nice if to be more explicit about it
    1560            0 :         let old_nblocks = if !modification
    1561            0 :             .tline
    1562            0 :             .get_slru_segment_exists(kind, segno, Version::Modified(modification), ctx)
    1563            0 :             .await?
    1564              :         {
    1565              :             // create it with 0 size initially, the logic below will extend it
    1566            0 :             modification
    1567            0 :                 .put_slru_segment_creation(kind, segno, 0, ctx)
    1568            0 :                 .await?;
    1569            0 :             0
    1570              :         } else {
    1571            0 :             modification
    1572            0 :                 .tline
    1573            0 :                 .get_slru_segment_size(kind, segno, Version::Modified(modification), ctx)
    1574            0 :                 .await?
    1575              :         };
    1576              : 
    1577            0 :         if new_nblocks > old_nblocks {
    1578            0 :             trace!(
    1579            0 :                 "extending SLRU {:?} seg {} from {} to {} blocks",
    1580              :                 kind, segno, old_nblocks, new_nblocks
    1581              :             );
    1582            0 :             modification.put_slru_extend(kind, segno, new_nblocks)?;
    1583              : 
    1584              :             // fill the gap with zeros
    1585            0 :             for gap_blknum in old_nblocks..blknum {
    1586            0 :                 modification.put_slru_page_image_zero(kind, segno, gap_blknum)?;
    1587              :             }
    1588            0 :         }
    1589            0 :         Ok(())
    1590            0 :     }
    1591              : }
    1592              : 
    1593              : /// Returns the size of the relation as of this modification, or None if the relation doesn't exist.
    1594              : ///
    1595              : /// This is only accurate on shard 0. On other shards, it will return the size up to the highest
    1596              : /// page number stored in the shard, or None if the shard does not have any pages for it.
    1597            6 : async fn get_relsize(
    1598            6 :     modification: &DatadirModification<'_>,
    1599            6 :     rel: RelTag,
    1600            6 :     ctx: &RequestContext,
    1601            6 : ) -> Result<Option<BlockNumber>, PageReconstructError> {
    1602            6 :     if !modification
    1603            6 :         .tline
    1604            6 :         .get_rel_exists(rel, Version::Modified(modification), ctx)
    1605            6 :         .await?
    1606              :     {
    1607            0 :         return Ok(None);
    1608            6 :     }
    1609            6 :     modification
    1610            6 :         .tline
    1611            6 :         .get_rel_size(rel, Version::Modified(modification), ctx)
    1612            6 :         .await
    1613            6 :         .map(Some)
    1614            6 : }
    1615              : 
    1616              : #[allow(clippy::bool_assert_comparison)]
    1617              : #[cfg(test)]
    1618              : mod tests {
    1619              :     use anyhow::Result;
    1620              :     use postgres_ffi::PgMajorVersion;
    1621              :     use postgres_ffi::RELSEG_SIZE;
    1622              : 
    1623              :     use super::*;
    1624              :     use crate::DEFAULT_PG_VERSION;
    1625              :     use crate::tenant::harness::*;
    1626              :     use crate::tenant::remote_timeline_client::{INITDB_PATH, remote_initdb_archive_path};
    1627              :     use crate::tenant::storage_layer::IoConcurrency;
    1628              : 
    1629              :     /// Arbitrary relation tag, for testing.
    1630              :     const TESTREL_A: RelTag = RelTag {
    1631              :         spcnode: 0,
    1632              :         dbnode: 111,
    1633              :         relnode: 1000,
    1634              :         forknum: 0,
    1635              :     };
    1636              : 
    1637            6 :     fn assert_current_logical_size(_timeline: &Timeline, _lsn: Lsn) {
    1638              :         // TODO
    1639            6 :     }
    1640              : 
    1641              :     #[tokio::test]
    1642            1 :     async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> {
    1643            5 :         for i in PgMajorVersion::ALL {
    1644            4 :             dispatch_pgversion!(i, {
    1645            1 :                 pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?;
    1646            1 :             });
    1647            1 :         }
    1648            1 : 
    1649            1 :         Ok(())
    1650            1 :     }
    1651              : 
    1652            4 :     async fn init_walingest_test(tline: &Timeline, ctx: &RequestContext) -> Result<WalIngest> {
    1653            4 :         let mut m = tline.begin_modification(Lsn(0x10));
    1654            4 :         m.put_checkpoint(dispatch_pgversion!(
    1655            4 :             tline.pg_version,
    1656            0 :             pgv::ZERO_CHECKPOINT.clone()
    1657            0 :         ))?;
    1658            4 :         m.put_relmap_file(0, 111, Bytes::from(""), ctx).await?; // dummy relmapper file
    1659            4 :         m.commit(ctx).await?;
    1660            4 :         let walingest = WalIngest::new(tline, Lsn(0x10), ctx).await?;
    1661              : 
    1662            4 :         Ok(walingest)
    1663            4 :     }
    1664              : 
    1665              :     #[tokio::test]
    1666            1 :     async fn test_relsize() -> Result<()> {
    1667            1 :         let (tenant, ctx) = TenantHarness::create("test_relsize").await?.load().await;
    1668            1 :         let io_concurrency = IoConcurrency::spawn_for_test();
    1669            1 :         let tline = tenant
    1670            1 :             .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
    1671            1 :             .await?;
    1672            1 :         let mut walingest = init_walingest_test(&tline, &ctx).await?;
    1673              : 
    1674            1 :         let mut m = tline.begin_modification(Lsn(0x20));
    1675            1 :         walingest.put_rel_creation(&mut m, TESTREL_A, &ctx).await?;
    1676            1 :         walingest
    1677            1 :             .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
    1678            1 :             .await?;
    1679            1 :         m.commit(&ctx).await?;
    1680            1 :         let mut m = tline.begin_modification(Lsn(0x30));
    1681            1 :         walingest
    1682            1 :             .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 3"), &ctx)
    1683            1 :             .await?;
    1684            1 :         m.commit(&ctx).await?;
    1685            1 :         let mut m = tline.begin_modification(Lsn(0x40));
    1686            1 :         walingest
    1687            1 :             .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1 at 4"), &ctx)
    1688            1 :             .await?;
    1689            1 :         m.commit(&ctx).await?;
    1690            1 :         let mut m = tline.begin_modification(Lsn(0x50));
    1691            1 :         walingest
    1692            1 :             .put_rel_page_image(&mut m, TESTREL_A, 2, test_img("foo blk 2 at 5"), &ctx)
    1693            1 :             .await?;
    1694            1 :         m.commit(&ctx).await?;
    1695              : 
    1696            1 :         assert_current_logical_size(&tline, Lsn(0x50));
    1697              : 
    1698            1 :         let test_span = tracing::info_span!(parent: None, "test",
    1699            0 :                                             tenant_id=%tline.tenant_shard_id.tenant_id,
    1700            0 :                                             shard_id=%tline.tenant_shard_id.shard_slug(),
    1701            0 :                                             timeline_id=%tline.timeline_id);
    1702              : 
    1703              :         // The relation was created at LSN 2, not visible at LSN 1 yet.
    1704            1 :         assert_eq!(
    1705            1 :             tline
    1706            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x10)), &ctx)
    1707            1 :                 .await?,
    1708              :             false
    1709              :         );
    1710            1 :         assert!(
    1711            1 :             tline
    1712            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x10)), &ctx)
    1713            1 :                 .await
    1714            1 :                 .is_err()
    1715              :         );
    1716            1 :         assert_eq!(
    1717            1 :             tline
    1718            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    1719            1 :                 .await?,
    1720              :             true
    1721              :         );
    1722            1 :         assert_eq!(
    1723            1 :             tline
    1724            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    1725            1 :                 .await?,
    1726              :             1
    1727              :         );
    1728            1 :         assert_eq!(
    1729            1 :             tline
    1730            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x50)), &ctx)
    1731            1 :                 .await?,
    1732              :             3
    1733              :         );
    1734              : 
    1735              :         // Check page contents at each LSN
    1736            1 :         assert_eq!(
    1737            1 :             tline
    1738            1 :                 .get_rel_page_at_lsn(
    1739            1 :                     TESTREL_A,
    1740            1 :                     0,
    1741            1 :                     Version::at(Lsn(0x20)),
    1742            1 :                     &ctx,
    1743            1 :                     io_concurrency.clone()
    1744            1 :                 )
    1745            1 :                 .instrument(test_span.clone())
    1746            1 :                 .await?,
    1747            1 :             test_img("foo blk 0 at 2")
    1748              :         );
    1749              : 
    1750            1 :         assert_eq!(
    1751            1 :             tline
    1752            1 :                 .get_rel_page_at_lsn(
    1753            1 :                     TESTREL_A,
    1754            1 :                     0,
    1755            1 :                     Version::at(Lsn(0x30)),
    1756            1 :                     &ctx,
    1757            1 :                     io_concurrency.clone()
    1758            1 :                 )
    1759            1 :                 .instrument(test_span.clone())
    1760            1 :                 .await?,
    1761            1 :             test_img("foo blk 0 at 3")
    1762              :         );
    1763              : 
    1764            1 :         assert_eq!(
    1765            1 :             tline
    1766            1 :                 .get_rel_page_at_lsn(
    1767            1 :                     TESTREL_A,
    1768            1 :                     0,
    1769            1 :                     Version::at(Lsn(0x40)),
    1770            1 :                     &ctx,
    1771            1 :                     io_concurrency.clone()
    1772            1 :                 )
    1773            1 :                 .instrument(test_span.clone())
    1774            1 :                 .await?,
    1775            1 :             test_img("foo blk 0 at 3")
    1776              :         );
    1777            1 :         assert_eq!(
    1778            1 :             tline
    1779            1 :                 .get_rel_page_at_lsn(
    1780            1 :                     TESTREL_A,
    1781            1 :                     1,
    1782            1 :                     Version::at(Lsn(0x40)),
    1783            1 :                     &ctx,
    1784            1 :                     io_concurrency.clone()
    1785            1 :                 )
    1786            1 :                 .instrument(test_span.clone())
    1787            1 :                 .await?,
    1788            1 :             test_img("foo blk 1 at 4")
    1789              :         );
    1790              : 
    1791            1 :         assert_eq!(
    1792            1 :             tline
    1793            1 :                 .get_rel_page_at_lsn(
    1794            1 :                     TESTREL_A,
    1795            1 :                     0,
    1796            1 :                     Version::at(Lsn(0x50)),
    1797            1 :                     &ctx,
    1798            1 :                     io_concurrency.clone()
    1799            1 :                 )
    1800            1 :                 .instrument(test_span.clone())
    1801            1 :                 .await?,
    1802            1 :             test_img("foo blk 0 at 3")
    1803              :         );
    1804            1 :         assert_eq!(
    1805            1 :             tline
    1806            1 :                 .get_rel_page_at_lsn(
    1807            1 :                     TESTREL_A,
    1808            1 :                     1,
    1809            1 :                     Version::at(Lsn(0x50)),
    1810            1 :                     &ctx,
    1811            1 :                     io_concurrency.clone()
    1812            1 :                 )
    1813            1 :                 .instrument(test_span.clone())
    1814            1 :                 .await?,
    1815            1 :             test_img("foo blk 1 at 4")
    1816              :         );
    1817            1 :         assert_eq!(
    1818            1 :             tline
    1819            1 :                 .get_rel_page_at_lsn(
    1820            1 :                     TESTREL_A,
    1821            1 :                     2,
    1822            1 :                     Version::at(Lsn(0x50)),
    1823            1 :                     &ctx,
    1824            1 :                     io_concurrency.clone()
    1825            1 :                 )
    1826            1 :                 .instrument(test_span.clone())
    1827            1 :                 .await?,
    1828            1 :             test_img("foo blk 2 at 5")
    1829              :         );
    1830              : 
    1831              :         // Truncate last block
    1832            1 :         let mut m = tline.begin_modification(Lsn(0x60));
    1833            1 :         walingest
    1834            1 :             .put_rel_truncation(&mut m, TESTREL_A, 2, &ctx)
    1835            1 :             .await?;
    1836            1 :         m.commit(&ctx).await?;
    1837            1 :         assert_current_logical_size(&tline, Lsn(0x60));
    1838              : 
    1839              :         // Check reported size and contents after truncation
    1840            1 :         assert_eq!(
    1841            1 :             tline
    1842            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x60)), &ctx)
    1843            1 :                 .await?,
    1844              :             2
    1845              :         );
    1846            1 :         assert_eq!(
    1847            1 :             tline
    1848            1 :                 .get_rel_page_at_lsn(
    1849            1 :                     TESTREL_A,
    1850            1 :                     0,
    1851            1 :                     Version::at(Lsn(0x60)),
    1852            1 :                     &ctx,
    1853            1 :                     io_concurrency.clone()
    1854            1 :                 )
    1855            1 :                 .instrument(test_span.clone())
    1856            1 :                 .await?,
    1857            1 :             test_img("foo blk 0 at 3")
    1858              :         );
    1859            1 :         assert_eq!(
    1860            1 :             tline
    1861            1 :                 .get_rel_page_at_lsn(
    1862            1 :                     TESTREL_A,
    1863            1 :                     1,
    1864            1 :                     Version::at(Lsn(0x60)),
    1865            1 :                     &ctx,
    1866            1 :                     io_concurrency.clone()
    1867            1 :                 )
    1868            1 :                 .instrument(test_span.clone())
    1869            1 :                 .await?,
    1870            1 :             test_img("foo blk 1 at 4")
    1871              :         );
    1872              : 
    1873              :         // should still see the truncated block with older LSN
    1874            1 :         assert_eq!(
    1875            1 :             tline
    1876            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x50)), &ctx)
    1877            1 :                 .await?,
    1878              :             3
    1879              :         );
    1880            1 :         assert_eq!(
    1881            1 :             tline
    1882            1 :                 .get_rel_page_at_lsn(
    1883            1 :                     TESTREL_A,
    1884            1 :                     2,
    1885            1 :                     Version::at(Lsn(0x50)),
    1886            1 :                     &ctx,
    1887            1 :                     io_concurrency.clone()
    1888            1 :                 )
    1889            1 :                 .instrument(test_span.clone())
    1890            1 :                 .await?,
    1891            1 :             test_img("foo blk 2 at 5")
    1892              :         );
    1893              : 
    1894              :         // Truncate to zero length
    1895            1 :         let mut m = tline.begin_modification(Lsn(0x68));
    1896            1 :         walingest
    1897            1 :             .put_rel_truncation(&mut m, TESTREL_A, 0, &ctx)
    1898            1 :             .await?;
    1899            1 :         m.commit(&ctx).await?;
    1900            1 :         assert_eq!(
    1901            1 :             tline
    1902            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x68)), &ctx)
    1903            1 :                 .await?,
    1904              :             0
    1905              :         );
    1906              : 
    1907              :         // Extend from 0 to 2 blocks, leaving a gap
    1908            1 :         let mut m = tline.begin_modification(Lsn(0x70));
    1909            1 :         walingest
    1910            1 :             .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1"), &ctx)
    1911            1 :             .await?;
    1912            1 :         m.commit(&ctx).await?;
    1913            1 :         assert_eq!(
    1914            1 :             tline
    1915            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x70)), &ctx)
    1916            1 :                 .await?,
    1917              :             2
    1918              :         );
    1919            1 :         assert_eq!(
    1920            1 :             tline
    1921            1 :                 .get_rel_page_at_lsn(
    1922            1 :                     TESTREL_A,
    1923            1 :                     0,
    1924            1 :                     Version::at(Lsn(0x70)),
    1925            1 :                     &ctx,
    1926            1 :                     io_concurrency.clone()
    1927            1 :                 )
    1928            1 :                 .instrument(test_span.clone())
    1929            1 :                 .await?,
    1930            1 :             ZERO_PAGE
    1931              :         );
    1932            1 :         assert_eq!(
    1933            1 :             tline
    1934            1 :                 .get_rel_page_at_lsn(
    1935            1 :                     TESTREL_A,
    1936            1 :                     1,
    1937            1 :                     Version::at(Lsn(0x70)),
    1938            1 :                     &ctx,
    1939            1 :                     io_concurrency.clone()
    1940            1 :                 )
    1941            1 :                 .instrument(test_span.clone())
    1942            1 :                 .await?,
    1943            1 :             test_img("foo blk 1")
    1944              :         );
    1945              : 
    1946              :         // Extend a lot more, leaving a big gap that spans across segments
    1947            1 :         let mut m = tline.begin_modification(Lsn(0x80));
    1948            1 :         walingest
    1949            1 :             .put_rel_page_image(&mut m, TESTREL_A, 1500, test_img("foo blk 1500"), &ctx)
    1950            1 :             .await?;
    1951            1 :         m.commit(&ctx).await?;
    1952            1 :         assert_eq!(
    1953            1 :             tline
    1954            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x80)), &ctx)
    1955            1 :                 .await?,
    1956              :             1501
    1957              :         );
    1958         1499 :         for blk in 2..1500 {
    1959         1498 :             assert_eq!(
    1960         1498 :                 tline
    1961         1498 :                     .get_rel_page_at_lsn(
    1962         1498 :                         TESTREL_A,
    1963         1498 :                         blk,
    1964         1498 :                         Version::at(Lsn(0x80)),
    1965         1498 :                         &ctx,
    1966         1498 :                         io_concurrency.clone()
    1967         1498 :                     )
    1968         1498 :                     .instrument(test_span.clone())
    1969         1498 :                     .await?,
    1970         1498 :                 ZERO_PAGE
    1971              :             );
    1972              :         }
    1973            1 :         assert_eq!(
    1974            1 :             tline
    1975            1 :                 .get_rel_page_at_lsn(
    1976            1 :                     TESTREL_A,
    1977            1 :                     1500,
    1978            1 :                     Version::at(Lsn(0x80)),
    1979            1 :                     &ctx,
    1980            1 :                     io_concurrency.clone()
    1981            1 :                 )
    1982            1 :                 .instrument(test_span.clone())
    1983            1 :                 .await?,
    1984            1 :             test_img("foo blk 1500")
    1985              :         );
    1986              : 
    1987            2 :         Ok(())
    1988            1 :     }
    1989              : 
    1990              :     // Test what happens if we dropped a relation
    1991              :     // and then created it again within the same layer.
    1992              :     #[tokio::test]
    1993            1 :     async fn test_drop_extend() -> Result<()> {
    1994            1 :         let (tenant, ctx) = TenantHarness::create("test_drop_extend")
    1995            1 :             .await?
    1996            1 :             .load()
    1997            1 :             .await;
    1998            1 :         let tline = tenant
    1999            1 :             .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
    2000            1 :             .await?;
    2001            1 :         let mut walingest = init_walingest_test(&tline, &ctx).await?;
    2002              : 
    2003            1 :         let mut m = tline.begin_modification(Lsn(0x20));
    2004            1 :         walingest
    2005            1 :             .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
    2006            1 :             .await?;
    2007            1 :         m.commit(&ctx).await?;
    2008              : 
    2009              :         // Check that rel exists and size is correct
    2010            1 :         assert_eq!(
    2011            1 :             tline
    2012            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    2013            1 :                 .await?,
    2014              :             true
    2015              :         );
    2016            1 :         assert_eq!(
    2017            1 :             tline
    2018            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    2019            1 :                 .await?,
    2020              :             1
    2021              :         );
    2022              : 
    2023              :         // Drop rel
    2024            1 :         let mut m = tline.begin_modification(Lsn(0x30));
    2025            1 :         let mut rel_drops = HashMap::new();
    2026            1 :         rel_drops.insert((TESTREL_A.spcnode, TESTREL_A.dbnode), vec![TESTREL_A]);
    2027            1 :         m.put_rel_drops(rel_drops, &ctx).await?;
    2028            1 :         m.commit(&ctx).await?;
    2029              : 
    2030              :         // Check that rel is not visible anymore
    2031            1 :         assert_eq!(
    2032            1 :             tline
    2033            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x30)), &ctx)
    2034            1 :                 .await?,
    2035              :             false
    2036              :         );
    2037              : 
    2038              :         // FIXME: should fail
    2039              :         //assert!(tline.get_rel_size(TESTREL_A, Lsn(0x30), false)?.is_none());
    2040              : 
    2041              :         // Re-create it
    2042            1 :         let mut m = tline.begin_modification(Lsn(0x40));
    2043            1 :         walingest
    2044            1 :             .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 4"), &ctx)
    2045            1 :             .await?;
    2046            1 :         m.commit(&ctx).await?;
    2047              : 
    2048              :         // Check that rel exists and size is correct
    2049            1 :         assert_eq!(
    2050            1 :             tline
    2051            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x40)), &ctx)
    2052            1 :                 .await?,
    2053              :             true
    2054              :         );
    2055            1 :         assert_eq!(
    2056            1 :             tline
    2057            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x40)), &ctx)
    2058            1 :                 .await?,
    2059              :             1
    2060              :         );
    2061              : 
    2062            2 :         Ok(())
    2063            1 :     }
    2064              : 
    2065              :     // Test what happens if we truncated a relation
    2066              :     // so that one of its segments was dropped
    2067              :     // and then extended it again within the same layer.
    2068              :     #[tokio::test]
    2069            1 :     async fn test_truncate_extend() -> Result<()> {
    2070            1 :         let (tenant, ctx) = TenantHarness::create("test_truncate_extend")
    2071            1 :             .await?
    2072            1 :             .load()
    2073            1 :             .await;
    2074            1 :         let io_concurrency = IoConcurrency::spawn_for_test();
    2075            1 :         let tline = tenant
    2076            1 :             .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
    2077            1 :             .await?;
    2078            1 :         let mut walingest = init_walingest_test(&tline, &ctx).await?;
    2079              : 
    2080              :         // Create a 20 MB relation (the size is arbitrary)
    2081            1 :         let relsize = 20 * 1024 * 1024 / 8192;
    2082            1 :         let mut m = tline.begin_modification(Lsn(0x20));
    2083         2560 :         for blkno in 0..relsize {
    2084         2560 :             let data = format!("foo blk {} at {}", blkno, Lsn(0x20));
    2085         2560 :             walingest
    2086         2560 :                 .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
    2087         2560 :                 .await?;
    2088              :         }
    2089            1 :         m.commit(&ctx).await?;
    2090              : 
    2091            1 :         let test_span = tracing::info_span!(parent: None, "test",
    2092            0 :                                             tenant_id=%tline.tenant_shard_id.tenant_id,
    2093            0 :                                             shard_id=%tline.tenant_shard_id.shard_slug(),
    2094            0 :                                             timeline_id=%tline.timeline_id);
    2095              : 
    2096              :         // The relation was created at LSN 20, not visible at LSN 1 yet.
    2097            1 :         assert_eq!(
    2098            1 :             tline
    2099            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x10)), &ctx)
    2100            1 :                 .await?,
    2101              :             false
    2102              :         );
    2103            1 :         assert!(
    2104            1 :             tline
    2105            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x10)), &ctx)
    2106            1 :                 .await
    2107            1 :                 .is_err()
    2108              :         );
    2109              : 
    2110            1 :         assert_eq!(
    2111            1 :             tline
    2112            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    2113            1 :                 .await?,
    2114              :             true
    2115              :         );
    2116            1 :         assert_eq!(
    2117            1 :             tline
    2118            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x20)), &ctx)
    2119            1 :                 .await?,
    2120              :             relsize
    2121              :         );
    2122              : 
    2123              :         // Check relation content
    2124         2560 :         for blkno in 0..relsize {
    2125         2560 :             let lsn = Lsn(0x20);
    2126         2560 :             let data = format!("foo blk {blkno} at {lsn}");
    2127         2560 :             assert_eq!(
    2128         2560 :                 tline
    2129         2560 :                     .get_rel_page_at_lsn(
    2130         2560 :                         TESTREL_A,
    2131         2560 :                         blkno,
    2132         2560 :                         Version::at(lsn),
    2133         2560 :                         &ctx,
    2134         2560 :                         io_concurrency.clone()
    2135         2560 :                     )
    2136         2560 :                     .instrument(test_span.clone())
    2137         2560 :                     .await?,
    2138         2560 :                 test_img(&data)
    2139              :             );
    2140              :         }
    2141              : 
    2142              :         // Truncate relation so that second segment was dropped
    2143              :         // - only leave one page
    2144            1 :         let mut m = tline.begin_modification(Lsn(0x60));
    2145            1 :         walingest
    2146            1 :             .put_rel_truncation(&mut m, TESTREL_A, 1, &ctx)
    2147            1 :             .await?;
    2148            1 :         m.commit(&ctx).await?;
    2149              : 
    2150              :         // Check reported size and contents after truncation
    2151            1 :         assert_eq!(
    2152            1 :             tline
    2153            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x60)), &ctx)
    2154            1 :                 .await?,
    2155              :             1
    2156              :         );
    2157              : 
    2158            2 :         for blkno in 0..1 {
    2159            1 :             let lsn = Lsn(0x20);
    2160            1 :             let data = format!("foo blk {blkno} at {lsn}");
    2161            1 :             assert_eq!(
    2162            1 :                 tline
    2163            1 :                     .get_rel_page_at_lsn(
    2164            1 :                         TESTREL_A,
    2165            1 :                         blkno,
    2166            1 :                         Version::at(Lsn(0x60)),
    2167            1 :                         &ctx,
    2168            1 :                         io_concurrency.clone()
    2169            1 :                     )
    2170            1 :                     .instrument(test_span.clone())
    2171            1 :                     .await?,
    2172            1 :                 test_img(&data)
    2173              :             );
    2174              :         }
    2175              : 
    2176              :         // should still see all blocks with older LSN
    2177            1 :         assert_eq!(
    2178            1 :             tline
    2179            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x50)), &ctx)
    2180            1 :                 .await?,
    2181              :             relsize
    2182              :         );
    2183         2560 :         for blkno in 0..relsize {
    2184         2560 :             let lsn = Lsn(0x20);
    2185         2560 :             let data = format!("foo blk {blkno} at {lsn}");
    2186         2560 :             assert_eq!(
    2187         2560 :                 tline
    2188         2560 :                     .get_rel_page_at_lsn(
    2189         2560 :                         TESTREL_A,
    2190         2560 :                         blkno,
    2191         2560 :                         Version::at(Lsn(0x50)),
    2192         2560 :                         &ctx,
    2193         2560 :                         io_concurrency.clone()
    2194         2560 :                     )
    2195         2560 :                     .instrument(test_span.clone())
    2196         2560 :                     .await?,
    2197         2560 :                 test_img(&data)
    2198              :             );
    2199              :         }
    2200              : 
    2201              :         // Extend relation again.
    2202              :         // Add enough blocks to create second segment
    2203            1 :         let lsn = Lsn(0x80);
    2204            1 :         let mut m = tline.begin_modification(lsn);
    2205         2560 :         for blkno in 0..relsize {
    2206         2560 :             let data = format!("foo blk {blkno} at {lsn}");
    2207         2560 :             walingest
    2208         2560 :                 .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
    2209         2560 :                 .await?;
    2210              :         }
    2211            1 :         m.commit(&ctx).await?;
    2212              : 
    2213            1 :         assert_eq!(
    2214            1 :             tline
    2215            1 :                 .get_rel_exists(TESTREL_A, Version::at(Lsn(0x80)), &ctx)
    2216            1 :                 .await?,
    2217              :             true
    2218              :         );
    2219            1 :         assert_eq!(
    2220            1 :             tline
    2221            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(0x80)), &ctx)
    2222            1 :                 .await?,
    2223              :             relsize
    2224              :         );
    2225              :         // Check relation content
    2226         2560 :         for blkno in 0..relsize {
    2227         2560 :             let lsn = Lsn(0x80);
    2228         2560 :             let data = format!("foo blk {blkno} at {lsn}");
    2229         2560 :             assert_eq!(
    2230         2560 :                 tline
    2231         2560 :                     .get_rel_page_at_lsn(
    2232         2560 :                         TESTREL_A,
    2233         2560 :                         blkno,
    2234         2560 :                         Version::at(Lsn(0x80)),
    2235         2560 :                         &ctx,
    2236         2560 :                         io_concurrency.clone()
    2237         2560 :                     )
    2238         2560 :                     .instrument(test_span.clone())
    2239         2560 :                     .await?,
    2240         2560 :                 test_img(&data)
    2241            1 :             );
    2242            1 :         }
    2243            1 : 
    2244            1 :         Ok(())
    2245            1 :     }
    2246              : 
    2247              :     /// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
    2248              :     /// split into multiple 1 GB segments in Postgres.
    2249              :     #[tokio::test]
    2250            1 :     async fn test_large_rel() -> Result<()> {
    2251            1 :         let (tenant, ctx) = TenantHarness::create("test_large_rel").await?.load().await;
    2252            1 :         let tline = tenant
    2253            1 :             .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
    2254            1 :             .await?;
    2255            1 :         let mut walingest = init_walingest_test(&tline, &ctx).await?;
    2256              : 
    2257            1 :         let mut lsn = 0x10;
    2258       131073 :         for blknum in 0..RELSEG_SIZE + 1 {
    2259       131073 :             lsn += 0x10;
    2260       131073 :             let mut m = tline.begin_modification(Lsn(lsn));
    2261       131073 :             let img = test_img(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
    2262       131073 :             walingest
    2263       131073 :                 .put_rel_page_image(&mut m, TESTREL_A, blknum as BlockNumber, img, &ctx)
    2264       131073 :                 .await?;
    2265       131073 :             m.commit(&ctx).await?;
    2266              :         }
    2267              : 
    2268            1 :         assert_current_logical_size(&tline, Lsn(lsn));
    2269              : 
    2270            1 :         assert_eq!(
    2271            1 :             tline
    2272            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(lsn)), &ctx)
    2273            1 :                 .await?,
    2274            1 :             RELSEG_SIZE + 1
    2275              :         );
    2276              : 
    2277              :         // Truncate one block
    2278            1 :         lsn += 0x10;
    2279            1 :         let mut m = tline.begin_modification(Lsn(lsn));
    2280            1 :         walingest
    2281            1 :             .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE, &ctx)
    2282            1 :             .await?;
    2283            1 :         m.commit(&ctx).await?;
    2284            1 :         assert_eq!(
    2285            1 :             tline
    2286            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(lsn)), &ctx)
    2287            1 :                 .await?,
    2288              :             RELSEG_SIZE
    2289              :         );
    2290            1 :         assert_current_logical_size(&tline, Lsn(lsn));
    2291              : 
    2292              :         // Truncate another block
    2293            1 :         lsn += 0x10;
    2294            1 :         let mut m = tline.begin_modification(Lsn(lsn));
    2295            1 :         walingest
    2296            1 :             .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE - 1, &ctx)
    2297            1 :             .await?;
    2298            1 :         m.commit(&ctx).await?;
    2299            1 :         assert_eq!(
    2300            1 :             tline
    2301            1 :                 .get_rel_size(TESTREL_A, Version::at(Lsn(lsn)), &ctx)
    2302            1 :                 .await?,
    2303            1 :             RELSEG_SIZE - 1
    2304              :         );
    2305            1 :         assert_current_logical_size(&tline, Lsn(lsn));
    2306              : 
    2307              :         // Truncate to 1500, and then truncate all the way down to 0, one block at a time
    2308              :         // This tests the behavior at segment boundaries
    2309            1 :         let mut size: i32 = 3000;
    2310         3002 :         while size >= 0 {
    2311         3001 :             lsn += 0x10;
    2312         3001 :             let mut m = tline.begin_modification(Lsn(lsn));
    2313         3001 :             walingest
    2314         3001 :                 .put_rel_truncation(&mut m, TESTREL_A, size as BlockNumber, &ctx)
    2315         3001 :                 .await?;
    2316         3001 :             m.commit(&ctx).await?;
    2317         3001 :             assert_eq!(
    2318         3001 :                 tline
    2319         3001 :                     .get_rel_size(TESTREL_A, Version::at(Lsn(lsn)), &ctx)
    2320         3001 :                     .await?,
    2321         3001 :                 size as BlockNumber
    2322              :             );
    2323              : 
    2324         3001 :             size -= 1;
    2325              :         }
    2326            1 :         assert_current_logical_size(&tline, Lsn(lsn));
    2327              : 
    2328            2 :         Ok(())
    2329            1 :     }
    2330              : 
    2331              :     /// Replay a wal segment file taken directly from safekeepers.
    2332              :     ///
    2333              :     /// This test is useful for benchmarking since it allows us to profile only
    2334              :     /// the walingest code in a single-threaded executor, and iterate more quickly
    2335              :     /// without waiting for unrelated steps.
    2336              :     #[tokio::test]
    2337            1 :     async fn test_ingest_real_wal() {
    2338              :         use postgres_ffi::WAL_SEGMENT_SIZE;
    2339              :         use postgres_ffi::waldecoder::WalStreamDecoder;
    2340              : 
    2341              :         use crate::tenant::harness::*;
    2342              : 
    2343              :         // Define test data path and constants.
    2344              :         //
    2345              :         // Steps to reconstruct the data, if needed:
    2346              :         // 1. Run the pgbench python test
    2347              :         // 2. Take the first wal segment file from safekeeper
    2348              :         // 3. Compress it using `zstd --long input_file`
    2349              :         // 4. Copy initdb.tar.zst from local_fs_remote_storage
    2350              :         // 5. Grep sk logs for "restart decoder" to get startpoint
    2351              :         // 6. Run just the decoder from this test to get the endpoint.
    2352              :         //    It's the last LSN the decoder will output.
    2353            1 :         let pg_version = PgMajorVersion::PG15; // The test data was generated by pg15
    2354            1 :         let path = "test_data/sk_wal_segment_from_pgbench";
    2355            1 :         let wal_segment_path = format!("{path}/000000010000000000000001.zst");
    2356            1 :         let source_initdb_path = format!("{path}/{INITDB_PATH}");
    2357            1 :         let startpoint = Lsn::from_hex("14AEC08").unwrap();
    2358            1 :         let _endpoint = Lsn::from_hex("1FFFF98").unwrap();
    2359              : 
    2360            1 :         let harness = TenantHarness::create("test_ingest_real_wal").await.unwrap();
    2361            1 :         let span = harness
    2362            1 :             .span()
    2363            1 :             .in_scope(|| info_span!("timeline_span", timeline_id=%TIMELINE_ID));
    2364            1 :         let (tenant, ctx) = harness.load().await;
    2365              : 
    2366            1 :         let remote_initdb_path =
    2367            1 :             remote_initdb_archive_path(&tenant.tenant_shard_id().tenant_id, &TIMELINE_ID);
    2368            1 :         let initdb_path = harness.remote_fs_dir.join(remote_initdb_path.get_path());
    2369              : 
    2370            1 :         std::fs::create_dir_all(initdb_path.parent().unwrap())
    2371            1 :             .expect("creating test dir should work");
    2372            1 :         std::fs::copy(source_initdb_path, initdb_path).expect("copying the initdb.tar.zst works");
    2373              : 
    2374              :         // Bootstrap a real timeline. We can't use create_test_timeline because
    2375              :         // it doesn't create a real checkpoint, and Walingest::new tries to parse
    2376              :         // the garbage data.
    2377            1 :         let tline = tenant
    2378            1 :             .bootstrap_timeline_test(TIMELINE_ID, pg_version, Some(TIMELINE_ID), &ctx)
    2379            1 :             .await
    2380            1 :             .unwrap();
    2381              : 
    2382              :         // We fully read and decompress this into memory before decoding
    2383              :         // to get a more accurate perf profile of the decoder.
    2384            1 :         let bytes = {
    2385              :             use async_compression::tokio::bufread::ZstdDecoder;
    2386            1 :             let file = tokio::fs::File::open(wal_segment_path).await.unwrap();
    2387            1 :             let reader = tokio::io::BufReader::new(file);
    2388            1 :             let decoder = ZstdDecoder::new(reader);
    2389            1 :             let mut reader = tokio::io::BufReader::new(decoder);
    2390            1 :             let mut buffer = Vec::new();
    2391            1 :             tokio::io::copy_buf(&mut reader, &mut buffer).await.unwrap();
    2392            1 :             buffer
    2393              :         };
    2394              : 
    2395              :         // TODO start a profiler too
    2396            1 :         let started_at = std::time::Instant::now();
    2397              : 
    2398              :         // Initialize walingest
    2399            1 :         let xlogoff: usize = startpoint.segment_offset(WAL_SEGMENT_SIZE);
    2400            1 :         let mut decoder = WalStreamDecoder::new(startpoint, pg_version);
    2401            1 :         let mut walingest = WalIngest::new(tline.as_ref(), startpoint, &ctx)
    2402            1 :             .await
    2403            1 :             .unwrap();
    2404            1 :         let mut modification = tline.begin_modification(startpoint);
    2405            1 :         println!("decoding {} bytes", bytes.len() - xlogoff);
    2406              : 
    2407              :         // Decode and ingest wal. We process the wal in chunks because
    2408              :         // that's what happens when we get bytes from safekeepers.
    2409       237343 :         for chunk in bytes[xlogoff..].chunks(50) {
    2410       237343 :             decoder.feed_bytes(chunk);
    2411       310268 :             while let Some((lsn, recdata)) = decoder.poll_decode().unwrap() {
    2412        72925 :                 let interpreted = InterpretedWalRecord::from_bytes_filtered(
    2413        72925 :                     recdata,
    2414        72925 :                     &[*modification.tline.get_shard_identity()],
    2415        72925 :                     lsn,
    2416        72925 :                     modification.tline.pg_version,
    2417        72925 :                 )
    2418        72925 :                 .unwrap()
    2419        72925 :                 .remove(modification.tline.get_shard_identity())
    2420        72925 :                 .unwrap();
    2421              : 
    2422        72925 :                 walingest
    2423        72925 :                     .ingest_record(interpreted, &mut modification, &ctx)
    2424        72925 :                     .instrument(span.clone())
    2425        72925 :                     .await
    2426        72925 :                     .unwrap();
    2427              :             }
    2428       237343 :             modification.commit(&ctx).await.unwrap();
    2429              :         }
    2430              : 
    2431            1 :         let duration = started_at.elapsed();
    2432            1 :         println!("done in {duration:?}");
    2433            1 :     }
    2434              : }
        

Generated by: LCOV version 2.1-beta