LCOV - code coverage report
Current view: top level - pageserver/src - walredo.rs (source / functions) Coverage Total Hit
Test: 2b0730d767f560e20b6748f57465922aa8bb805e.info Lines: 67.2 % 369 248
Test Date: 2024-09-25 14:04:07 Functions: 57.9 % 38 22

            Line data    Source code
       1              : //!
       2              : //! WAL redo. This service runs PostgreSQL in a special wal_redo mode
       3              : //! to apply given WAL records over an old page image and return new
       4              : //! page image.
       5              : //!
       6              : //! We rely on Postgres to perform WAL redo for us. We launch a
       7              : //! postgres process in special "wal redo" mode that's similar to
       8              : //! single-user mode. We then pass the previous page image, if any,
       9              : //! and all the WAL records we want to apply, to the postgres
      10              : //! process. Then we get the page image back. Communication with the
      11              : //! postgres process happens via stdin/stdout
      12              : //!
      13              : //! See pgxn/neon_walredo/walredoproc.c for the other side of
      14              : //! this communication.
      15              : //!
      16              : //! The Postgres process is assumed to be secure against malicious WAL
      17              : //! records. It achieves it by dropping privileges before replaying
      18              : //! any WAL records, so that even if an attacker hijacks the Postgres
      19              : //! process, he cannot escape out of it.
      20              : 
      21              : /// Process lifecycle and abstracction for the IPC protocol.
      22              : mod process;
      23              : 
      24              : /// Code to apply [`NeonWalRecord`]s.
      25              : pub(crate) mod apply_neon;
      26              : 
      27              : use crate::config::PageServerConf;
      28              : use crate::metrics::{
      29              :     WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
      30              :     WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_TIME,
      31              : };
      32              : use crate::repository::Key;
      33              : use crate::walrecord::NeonWalRecord;
      34              : use anyhow::Context;
      35              : use bytes::{Bytes, BytesMut};
      36              : use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus};
      37              : use pageserver_api::shard::TenantShardId;
      38              : use std::future::Future;
      39              : use std::sync::Arc;
      40              : use std::time::Duration;
      41              : use std::time::Instant;
      42              : use tracing::*;
      43              : use utils::lsn::Lsn;
      44              : use utils::sync::gate::GateError;
      45              : use utils::sync::heavier_once_cell;
      46              : 
      47              : /// The real implementation that uses a Postgres process to
      48              : /// perform WAL replay.
      49              : ///
      50              : /// Only one thread can use the process at a time, that is controlled by the
      51              : /// Mutex. In the future, we might want to launch a pool of processes to allow
      52              : /// concurrent replay of multiple records.
      53              : pub struct PostgresRedoManager {
      54              :     tenant_shard_id: TenantShardId,
      55              :     conf: &'static PageServerConf,
      56              :     last_redo_at: std::sync::Mutex<Option<Instant>>,
      57              :     /// We use [`heavier_once_cell`] for
      58              :     ///
      59              :     /// 1. coalescing the lazy spawning of walredo processes ([`ProcessOnceCell::Spawned`])
      60              :     /// 2. prevent new processes from being spawned on [`Self::shutdown`] (=> [`ProcessOnceCell::ManagerShutDown`]).
      61              :     ///
      62              :     /// # Spawning
      63              :     ///
      64              :     /// Redo requests use the once cell to coalesce onto one call to [`process::WalRedoProcess::launch`].
      65              :     ///
      66              :     /// Notably, requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the
      67              :     /// their process object; we use [`Arc::clone`] for that.
      68              :     ///
      69              :     /// This is primarily because earlier implementations that didn't  use [`heavier_once_cell`]
      70              :     /// had that behavior; it's probably unnecessary.
      71              :     /// The only merit of it is that if one walredo process encounters an error,
      72              :     /// it can take it out of rotation (= using [`heavier_once_cell::Guard::take_and_deinit`].
      73              :     /// and retry redo, thereby starting the new process, while other redo tasks might
      74              :     /// still be using the old redo process. But, those other tasks will most likely
      75              :     /// encounter an error as well, and errors are an unexpected condition anyway.
      76              :     /// So, probably we could get rid of the `Arc` in the future.
      77              :     ///
      78              :     /// # Shutdown
      79              :     ///
      80              :     /// See [`Self::launched_processes`].
      81              :     redo_process: heavier_once_cell::OnceCell<ProcessOnceCell>,
      82              : 
      83              :     /// Gate that is entered when launching a walredo process and held open
      84              :     /// until the process has been `kill()`ed and `wait()`ed upon.
      85              :     ///
      86              :     /// Manager shutdown waits for this gate to close after setting the
      87              :     /// [`ProcessOnceCell::ManagerShutDown`] state in [`Self::redo_process`].
      88              :     ///
      89              :     /// This type of usage is a bit unusual because gates usually keep track of
      90              :     /// concurrent operations, e.g., every [`Self::request_redo`] that is inflight.
      91              :     /// But we use it here to keep track of the _processes_ that we have launched,
      92              :     /// which may outlive any individual redo request because
      93              :     /// - we keep walredo process around until its quiesced to amortize spawn cost and
      94              :     /// - the Arc may be held by multiple concurrent redo requests, so, just because
      95              :     ///   you replace the [`Self::redo_process`] cell's content doesn't mean the
      96              :     ///   process gets killed immediately.
      97              :     ///
      98              :     /// We could simplify this by getting rid of the [`Arc`].
      99              :     /// See the comment on [`Self::redo_process`] for more details.
     100              :     launched_processes: utils::sync::gate::Gate,
     101              : }
     102              : 
     103              : /// See [`PostgresRedoManager::redo_process`].
     104              : enum ProcessOnceCell {
     105              :     Spawned(Arc<Process>),
     106              :     ManagerShutDown,
     107              : }
     108              : 
     109              : struct Process {
     110              :     process: process::WalRedoProcess,
     111              :     /// This field is last in this struct so the guard gets dropped _after_ [`Self::process`].
     112              :     /// (Reminder: dropping [`Self::process`] synchronously sends SIGKILL and then `wait()`s for it to exit).
     113              :     _launched_processes_guard: utils::sync::gate::GateGuard,
     114              : }
     115              : 
     116              : impl std::ops::Deref for Process {
     117              :     type Target = process::WalRedoProcess;
     118              : 
     119           48 :     fn deref(&self) -> &Self::Target {
     120           48 :         &self.process
     121           48 :     }
     122              : }
     123              : 
     124            0 : #[derive(Debug, thiserror::Error)]
     125              : pub enum Error {
     126              :     #[error("cancelled")]
     127              :     Cancelled,
     128              :     #[error(transparent)]
     129              :     Other(#[from] anyhow::Error),
     130              : }
     131              : 
     132              : macro_rules! bail {
     133              :     ($($arg:tt)*) => {
     134              :         return Err($crate::walredo::Error::Other(::anyhow::anyhow!($($arg)*)));
     135              :     }
     136              : }
     137              : 
     138              : ///
     139              : /// Public interface of WAL redo manager
     140              : ///
     141              : impl PostgresRedoManager {
     142              :     ///
     143              :     /// Request the WAL redo manager to apply some WAL records
     144              :     ///
     145              :     /// The WAL redo is handled by a separate thread, so this just sends a request
     146              :     /// to the thread and waits for response.
     147              :     ///
     148              :     /// # Cancel-Safety
     149              :     ///
     150              :     /// This method is cancellation-safe.
     151           18 :     pub async fn request_redo(
     152           18 :         &self,
     153           18 :         key: Key,
     154           18 :         lsn: Lsn,
     155           18 :         base_img: Option<(Lsn, Bytes)>,
     156           18 :         records: Vec<(Lsn, NeonWalRecord)>,
     157           18 :         pg_version: u32,
     158           18 :     ) -> Result<Bytes, Error> {
     159           18 :         if records.is_empty() {
     160            0 :             bail!("invalid WAL redo request with no records");
     161           18 :         }
     162           18 : 
     163           18 :         let base_img_lsn = base_img.as_ref().map(|p| p.0).unwrap_or(Lsn::INVALID);
     164           18 :         let mut img = base_img.map(|p| p.1);
     165           18 :         let mut batch_neon = apply_neon::can_apply_in_neon(&records[0].1);
     166           18 :         let mut batch_start = 0;
     167           18 :         for (i, record) in records.iter().enumerate().skip(1) {
     168           18 :             let rec_neon = apply_neon::can_apply_in_neon(&record.1);
     169           18 : 
     170           18 :             if rec_neon != batch_neon {
     171            0 :                 let result = if batch_neon {
     172            0 :                     self.apply_batch_neon(key, lsn, img, &records[batch_start..i])
     173              :                 } else {
     174            0 :                     self.apply_batch_postgres(
     175            0 :                         key,
     176            0 :                         lsn,
     177            0 :                         img,
     178            0 :                         base_img_lsn,
     179            0 :                         &records[batch_start..i],
     180            0 :                         self.conf.wal_redo_timeout,
     181            0 :                         pg_version,
     182            0 :                     )
     183            0 :                     .await
     184              :                 };
     185            0 :                 img = Some(result?);
     186              : 
     187            0 :                 batch_neon = rec_neon;
     188            0 :                 batch_start = i;
     189           18 :             }
     190              :         }
     191              :         // last batch
     192           18 :         if batch_neon {
     193            0 :             self.apply_batch_neon(key, lsn, img, &records[batch_start..])
     194              :         } else {
     195           18 :             self.apply_batch_postgres(
     196           18 :                 key,
     197           18 :                 lsn,
     198           18 :                 img,
     199           18 :                 base_img_lsn,
     200           18 :                 &records[batch_start..],
     201           18 :                 self.conf.wal_redo_timeout,
     202           18 :                 pg_version,
     203           18 :             )
     204           48 :             .await
     205              :         }
     206           18 :     }
     207              : 
     208            0 :     pub fn status(&self) -> WalRedoManagerStatus {
     209            0 :         WalRedoManagerStatus {
     210            0 :             last_redo_at: {
     211            0 :                 let at = *self.last_redo_at.lock().unwrap();
     212            0 :                 at.and_then(|at| {
     213            0 :                     let age = at.elapsed();
     214            0 :                     // map any chrono errors silently to None here
     215            0 :                     chrono::Utc::now().checked_sub_signed(chrono::Duration::from_std(age).ok()?)
     216            0 :                 })
     217            0 :             },
     218            0 :             process: self.redo_process.get().and_then(|p| match &*p {
     219            0 :                 ProcessOnceCell::Spawned(p) => Some(WalRedoManagerProcessStatus { pid: p.id() }),
     220            0 :                 ProcessOnceCell::ManagerShutDown => None,
     221            0 :             }),
     222            0 :         }
     223            0 :     }
     224              : }
     225              : 
     226              : impl PostgresRedoManager {
     227              :     ///
     228              :     /// Create a new PostgresRedoManager.
     229              :     ///
     230           18 :     pub fn new(
     231           18 :         conf: &'static PageServerConf,
     232           18 :         tenant_shard_id: TenantShardId,
     233           18 :     ) -> PostgresRedoManager {
     234           18 :         // The actual process is launched lazily, on first request.
     235           18 :         PostgresRedoManager {
     236           18 :             tenant_shard_id,
     237           18 :             conf,
     238           18 :             last_redo_at: std::sync::Mutex::default(),
     239           18 :             redo_process: heavier_once_cell::OnceCell::default(),
     240           18 :             launched_processes: utils::sync::gate::Gate::default(),
     241           18 :         }
     242           18 :     }
     243              : 
     244              :     /// Shut down the WAL redo manager.
     245              :     ///
     246              :     /// Returns `true` if this call was the one that initiated shutdown.
     247              :     /// `true` may be observed by no caller if the first caller stops polling.
     248              :     ///
     249              :     /// After this future completes
     250              :     /// - no redo process is running
     251              :     /// - no new redo process will be spawned
     252              :     /// - redo requests that need walredo process will fail with [`Error::Cancelled`]
     253              :     /// - [`apply_neon`]-only redo requests may still work, but this may change in the future
     254              :     ///
     255              :     /// # Cancel-Safety
     256              :     ///
     257              :     /// This method is cancellation-safe.
     258            0 :     pub async fn shutdown(&self) -> bool {
     259              :         // prevent new processes from being spawned
     260            0 :         let maybe_permit = match self.redo_process.get_or_init_detached().await {
     261            0 :             Ok(guard) => {
     262            0 :                 if matches!(&*guard, ProcessOnceCell::ManagerShutDown) {
     263            0 :                     None
     264              :                 } else {
     265            0 :                     let (proc, permit) = guard.take_and_deinit();
     266            0 :                     drop(proc); // this just drops the Arc, its refcount may not be zero yet
     267            0 :                     Some(permit)
     268              :                 }
     269              :             }
     270            0 :             Err(permit) => Some(permit),
     271              :         };
     272            0 :         let it_was_us = if let Some(permit) = maybe_permit {
     273            0 :             self.redo_process
     274            0 :                 .set(ProcessOnceCell::ManagerShutDown, permit);
     275            0 :             true
     276              :         } else {
     277            0 :             false
     278              :         };
     279              :         // wait for ongoing requests to drain and the refcounts of all Arc<WalRedoProcess> that
     280              :         // we ever launched to drop to zero, which when it happens synchronously kill()s & wait()s
     281              :         // for the underlying process.
     282            0 :         self.launched_processes.close().await;
     283            0 :         it_was_us
     284            0 :     }
     285              : 
     286              :     /// This type doesn't have its own background task to check for idleness: we
     287              :     /// rely on our owner calling this function periodically in its own housekeeping
     288              :     /// loops.
     289            0 :     pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
     290            0 :         if let Ok(g) = self.last_redo_at.try_lock() {
     291            0 :             if let Some(last_redo_at) = *g {
     292            0 :                 if last_redo_at.elapsed() >= idle_timeout {
     293            0 :                     drop(g);
     294            0 :                     drop(self.redo_process.get().map(|guard| guard.take_and_deinit()));
     295            0 :                 }
     296            0 :             }
     297            0 :         }
     298            0 :     }
     299              : 
     300           24 :     async fn do_with_walredo_process<
     301           24 :         F: FnOnce(Arc<Process>) -> Fut,
     302           24 :         Fut: Future<Output = Result<O, Error>>,
     303           24 :         O,
     304           24 :     >(
     305           24 :         &self,
     306           24 :         pg_version: u32,
     307           24 :         closure: F,
     308           24 :     ) -> Result<O, Error> {
     309           24 :         let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await {
     310            0 :             Ok(guard) => match &*guard {
     311            0 :                 ProcessOnceCell::Spawned(proc) => Arc::clone(proc),
     312              :                 ProcessOnceCell::ManagerShutDown => {
     313            0 :                     return Err(Error::Cancelled);
     314              :                 }
     315              :             },
     316           24 :             Err(permit) => {
     317           24 :                 let start = Instant::now();
     318              :                 // acquire guard before spawning process, so that we don't spawn new processes
     319              :                 // if the gate is already closed.
     320           24 :                 let _launched_processes_guard = match self.launched_processes.enter() {
     321           24 :                     Ok(guard) => guard,
     322            0 :                     Err(GateError::GateClosed) => unreachable!(
     323            0 :                         "shutdown sets the once cell to `ManagerShutDown` state before closing the gate"
     324            0 :                     ),
     325              :                 };
     326           24 :                 let proc = Arc::new(Process {
     327           24 :                     process: process::WalRedoProcess::launch(
     328           24 :                         self.conf,
     329           24 :                         self.tenant_shard_id,
     330           24 :                         pg_version,
     331           24 :                     )
     332           24 :                     .context("launch walredo process")?,
     333           24 :                     _launched_processes_guard,
     334           24 :                 });
     335           24 :                 let duration = start.elapsed();
     336           24 :                 WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64());
     337           24 :                 info!(
     338            0 :                     elapsed_ms = duration.as_millis(),
     339            0 :                     pid = proc.id(),
     340            0 :                     "launched walredo process"
     341              :                 );
     342           24 :                 self.redo_process
     343           24 :                     .set(ProcessOnceCell::Spawned(Arc::clone(&proc)), permit);
     344           24 :                 proc
     345              :             }
     346              :         };
     347              : 
     348              :         // async closures are unstable, would support &Process
     349           48 :         let result = closure(proc.clone()).await;
     350              : 
     351           24 :         if result.is_err() {
     352              :             // Avoid concurrent callers hitting the same issue by taking `proc` out of the rotation.
     353              :             // Note that there may be other tasks concurrent with us that also hold `proc`.
     354              :             // We have to deal with that here.
     355              :             // Also read the doc comment on field `self.redo_process`.
     356              :             //
     357              :             // NB: there may still be other concurrent threads using `proc`.
     358              :             // The last one will send SIGKILL when the underlying Arc reaches refcount 0.
     359              :             //
     360              :             // NB: the drop impl blocks the dropping thread with a wait() system call for
     361              :             // the child process. In some ways the blocking is actually good: if we
     362              :             // deferred the waiting into the background / to tokio if we used `tokio::process`,
     363              :             // it could happen that if walredo always fails immediately, we spawn processes faster
     364              :             // than we can SIGKILL & `wait` for them to exit. By doing it the way we do here,
     365              :             // we limit this risk of run-away to at most $num_runtimes * $num_executor_threads.
     366              :             // This probably needs revisiting at some later point.
     367           12 :             match self.redo_process.get() {
     368            0 :                 None => (),
     369           12 :                 Some(guard) => {
     370           12 :                     match &*guard {
     371            0 :                         ProcessOnceCell::ManagerShutDown => {}
     372           12 :                         ProcessOnceCell::Spawned(guard_proc) => {
     373           12 :                             if Arc::ptr_eq(&proc, guard_proc) {
     374           12 :                                 // We're the first to observe an error from `proc`, it's our job to take it out of rotation.
     375           12 :                                 guard.take_and_deinit();
     376           12 :                             } else {
     377            0 :                                 // Another task already spawned another redo process (further up in this method)
     378            0 :                                 // and put it into `redo_process`. Do nothing, our view of the world is behind.
     379            0 :                             }
     380              :                         }
     381              :                     }
     382              :                 }
     383              :             }
     384              :             // The last task that does this `drop()` of `proc` will do a blocking `wait()` syscall.
     385           12 :             drop(proc);
     386           12 :         }
     387              : 
     388           24 :         result
     389           24 :     }
     390              : 
     391              :     ///
     392              :     /// Process one request for WAL redo using wal-redo postgres
     393              :     ///
     394              :     /// # Cancel-Safety
     395              :     ///
     396              :     /// Cancellation safe.
     397              :     #[allow(clippy::too_many_arguments)]
     398           18 :     async fn apply_batch_postgres(
     399           18 :         &self,
     400           18 :         key: Key,
     401           18 :         lsn: Lsn,
     402           18 :         base_img: Option<Bytes>,
     403           18 :         base_img_lsn: Lsn,
     404           18 :         records: &[(Lsn, NeonWalRecord)],
     405           18 :         wal_redo_timeout: Duration,
     406           18 :         pg_version: u32,
     407           18 :     ) -> Result<Bytes, Error> {
     408           18 :         *(self.last_redo_at.lock().unwrap()) = Some(Instant::now());
     409              : 
     410           18 :         let (rel, blknum) = key.to_rel_block().context("invalid record")?;
     411              :         const MAX_RETRY_ATTEMPTS: u32 = 1;
     412           18 :         let mut n_attempts = 0u32;
     413           24 :         loop {
     414           24 :             let base_img = &base_img;
     415           24 :             let closure = |proc: Arc<Process>| async move {
     416           24 :                 let started_at = std::time::Instant::now();
     417              : 
     418              :                 // Relational WAL records are applied using wal-redo-postgres
     419           24 :                 let result = proc
     420           24 :                     .apply_wal_records(rel, blknum, base_img, records, wal_redo_timeout)
     421           48 :                     .await
     422           24 :                     .context("apply_wal_records");
     423           24 : 
     424           24 :                 let duration = started_at.elapsed();
     425           24 : 
     426           24 :                 let len = records.len();
     427           48 :                 let nbytes = records.iter().fold(0, |acumulator, record| {
     428           48 :                     acumulator
     429           48 :                         + match &record.1 {
     430           48 :                             NeonWalRecord::Postgres { rec, .. } => rec.len(),
     431            0 :                             _ => unreachable!("Only PostgreSQL records are accepted in this batch"),
     432              :                         }
     433           48 :                 });
     434           24 : 
     435           24 :                 WAL_REDO_TIME.observe(duration.as_secs_f64());
     436           24 :                 WAL_REDO_RECORDS_HISTOGRAM.observe(len as f64);
     437           24 :                 WAL_REDO_BYTES_HISTOGRAM.observe(nbytes as f64);
     438           24 : 
     439           24 :                 debug!(
     440            0 :                     "postgres applied {} WAL records ({} bytes) in {} us to reconstruct page image at LSN {}",
     441            0 :                     len,
     442            0 :                     nbytes,
     443            0 :                     duration.as_micros(),
     444              :                     lsn
     445              :                 );
     446              : 
     447           24 :                 if let Err(e) = result.as_ref() {
     448           12 :                     error!(
     449            0 :                         "error applying {} WAL records {}..{} ({} bytes) to key {key}, from base image with LSN {} to reconstruct page image at LSN {} n_attempts={}: {:?}",
     450            0 :                         records.len(),
     451           12 :                         records.first().map(|p| p.0).unwrap_or(Lsn(0)),
     452           12 :                         records.last().map(|p| p.0).unwrap_or(Lsn(0)),
     453              :                         nbytes,
     454              :                         base_img_lsn,
     455              :                         lsn,
     456              :                         n_attempts,
     457              :                         e,
     458              :                     );
     459           12 :                 }
     460              : 
     461           24 :                 result.map_err(Error::Other)
     462           24 :             };
     463           48 :             let result = self.do_with_walredo_process(pg_version, closure).await;
     464              : 
     465           24 :             if result.is_ok() && n_attempts != 0 {
     466            0 :                 info!(n_attempts, "retried walredo succeeded");
     467           24 :             }
     468           24 :             n_attempts += 1;
     469           24 :             if n_attempts > MAX_RETRY_ATTEMPTS || result.is_ok() {
     470           18 :                 return result;
     471            6 :             }
     472              :         }
     473           18 :     }
     474              : 
     475              :     ///
     476              :     /// Process a batch of WAL records using bespoken Neon code.
     477              :     ///
     478            0 :     fn apply_batch_neon(
     479            0 :         &self,
     480            0 :         key: Key,
     481            0 :         lsn: Lsn,
     482            0 :         base_img: Option<Bytes>,
     483            0 :         records: &[(Lsn, NeonWalRecord)],
     484            0 :     ) -> Result<Bytes, Error> {
     485            0 :         let start_time = Instant::now();
     486            0 : 
     487            0 :         let mut page = BytesMut::new();
     488            0 :         if let Some(fpi) = base_img {
     489            0 :             // If full-page image is provided, then use it...
     490            0 :             page.extend_from_slice(&fpi[..]);
     491            0 :         } else {
     492              :             // All the current WAL record types that we can handle require a base image.
     493            0 :             bail!("invalid neon WAL redo request with no base image");
     494              :         }
     495              : 
     496              :         // Apply all the WAL records in the batch
     497            0 :         for (record_lsn, record) in records.iter() {
     498            0 :             self.apply_record_neon(key, &mut page, *record_lsn, record)?;
     499              :         }
     500              :         // Success!
     501            0 :         let duration = start_time.elapsed();
     502            0 :         // FIXME: using the same metric here creates a bimodal distribution by default, and because
     503            0 :         // there could be multiple batch sizes this would be N+1 modal.
     504            0 :         WAL_REDO_TIME.observe(duration.as_secs_f64());
     505            0 : 
     506            0 :         debug!(
     507            0 :             "neon applied {} WAL records in {} us to reconstruct page image at LSN {}",
     508            0 :             records.len(),
     509            0 :             duration.as_micros(),
     510              :             lsn
     511              :         );
     512              : 
     513            0 :         Ok(page.freeze())
     514            0 :     }
     515              : 
     516            0 :     fn apply_record_neon(
     517            0 :         &self,
     518            0 :         key: Key,
     519            0 :         page: &mut BytesMut,
     520            0 :         record_lsn: Lsn,
     521            0 :         record: &NeonWalRecord,
     522            0 :     ) -> anyhow::Result<()> {
     523            0 :         apply_neon::apply_in_neon(record, record_lsn, key, page)?;
     524              : 
     525            0 :         Ok(())
     526            0 :     }
     527              : }
     528              : 
     529              : #[cfg(test)]
     530              : mod tests {
     531              :     use super::PostgresRedoManager;
     532              :     use crate::repository::Key;
     533              :     use crate::{config::PageServerConf, walrecord::NeonWalRecord};
     534              :     use bytes::Bytes;
     535              :     use pageserver_api::shard::TenantShardId;
     536              :     use std::str::FromStr;
     537              :     use tracing::Instrument;
     538              :     use utils::{id::TenantId, lsn::Lsn};
     539              : 
     540              :     #[tokio::test]
     541            6 :     async fn short_v14_redo() {
     542            6 :         let expected = std::fs::read("test_data/short_v14_redo.page").unwrap();
     543            6 : 
     544            6 :         let h = RedoHarness::new().unwrap();
     545            6 : 
     546            6 :         let page = h
     547            6 :             .manager
     548            6 :             .request_redo(
     549            6 :                 Key {
     550            6 :                     field1: 0,
     551            6 :                     field2: 1663,
     552            6 :                     field3: 13010,
     553            6 :                     field4: 1259,
     554            6 :                     field5: 0,
     555            6 :                     field6: 0,
     556            6 :                 },
     557            6 :                 Lsn::from_str("0/16E2408").unwrap(),
     558            6 :                 None,
     559            6 :                 short_records(),
     560            6 :                 14,
     561            6 :             )
     562            6 :             .instrument(h.span())
     563           12 :             .await
     564            6 :             .unwrap();
     565            6 : 
     566            6 :         assert_eq!(&expected, &*page);
     567            6 :     }
     568              : 
     569              :     #[tokio::test]
     570            6 :     async fn short_v14_fails_for_wrong_key_but_returns_zero_page() {
     571            6 :         let h = RedoHarness::new().unwrap();
     572            6 : 
     573            6 :         let page = h
     574            6 :             .manager
     575            6 :             .request_redo(
     576            6 :                 Key {
     577            6 :                     field1: 0,
     578            6 :                     field2: 1663,
     579            6 :                     // key should be 13010
     580            6 :                     field3: 13130,
     581            6 :                     field4: 1259,
     582            6 :                     field5: 0,
     583            6 :                     field6: 0,
     584            6 :                 },
     585            6 :                 Lsn::from_str("0/16E2408").unwrap(),
     586            6 :                 None,
     587            6 :                 short_records(),
     588            6 :                 14,
     589            6 :             )
     590            6 :             .instrument(h.span())
     591           12 :             .await
     592            6 :             .unwrap();
     593            6 : 
     594            6 :         // TODO: there will be some stderr printout, which is forwarded to tracing that could
     595            6 :         // perhaps be captured as long as it's in the same thread.
     596            6 :         assert_eq!(page, crate::ZERO_PAGE);
     597            6 :     }
     598              : 
     599              :     #[tokio::test]
     600            6 :     async fn test_stderr() {
     601            6 :         let h = RedoHarness::new().unwrap();
     602            6 :         h
     603            6 :             .manager
     604            6 :             .request_redo(
     605            6 :                 Key::from_i128(0),
     606            6 :                 Lsn::INVALID,
     607            6 :                 None,
     608            6 :                 short_records(),
     609            6 :                 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
     610            6 :             )
     611            6 :             .instrument(h.span())
     612           24 :             .await
     613            6 :             .unwrap_err();
     614            6 :     }
     615              : 
     616              :     #[allow(clippy::octal_escapes)]
     617           18 :     fn short_records() -> Vec<(Lsn, NeonWalRecord)> {
     618           18 :         vec![
     619           18 :             (
     620           18 :                 Lsn::from_str("0/16A9388").unwrap(),
     621           18 :                 NeonWalRecord::Postgres {
     622           18 :                     will_init: true,
     623           18 :                     rec: Bytes::from_static(b"j\x03\0\0\0\x04\0\0\xe8\x7fj\x01\0\0\0\0\0\n\0\0\xd0\x16\x13Y\0\x10\0\04\x03\xd4\0\x05\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x03\0\0\0\0\x80\xeca\x01\0\0\x01\0\xd4\0\xa0\x1d\0 \x04 \0\0\0\0/\0\x01\0\xa0\x9dX\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\00\x9f\x9a\x01P\x9e\xb2\x01\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0!\0\x01\x08 \xff\xff\xff?\0\0\0\0\0\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\0\0\0\0\0\0\x80\xbf\0\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\0\0\0\0\x0c\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0/\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0\xdf\x04\0\0pg_type\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0G\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0@\x16D\x0e\0\0\0K\x10\0\0\x01\0pr \0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0[\x01\0\0\0\0\0\0\0\t\x04\0\0\x02\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0C\x01\0\0\x15\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0;\n\0\0pg_statistic\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xfd.\0\0\0\0\0\0\n\0\0\0\x02\0\0\0;\n\0\0\0\0\0\0\x13\0\0\0\0\0\xcbC\x13\0\0\0\x18\x0b\0\0\x01\0pr\x1f\0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0C\x01\0\0\0\0\0\0\0\t\x04\0\0\x01\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\x02\0\x01")
     624           18 :                 }
     625           18 :             ),
     626           18 :             (
     627           18 :                 Lsn::from_str("0/16D4080").unwrap(),
     628           18 :                 NeonWalRecord::Postgres {
     629           18 :                     will_init: false,
     630           18 :                     rec: Bytes::from_static(b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0")
     631           18 :                 }
     632           18 :             )
     633           18 :         ]
     634           18 :     }
     635              : 
     636              :     struct RedoHarness {
     637              :         // underscored because unused, except for removal at drop
     638              :         _repo_dir: camino_tempfile::Utf8TempDir,
     639              :         manager: PostgresRedoManager,
     640              :         tenant_shard_id: TenantShardId,
     641              :     }
     642              : 
     643              :     impl RedoHarness {
     644           18 :         fn new() -> anyhow::Result<Self> {
     645           18 :             crate::tenant::harness::setup_logging();
     646              : 
     647           18 :             let repo_dir = camino_tempfile::tempdir()?;
     648           18 :             let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
     649           18 :             let conf = Box::leak(Box::new(conf));
     650           18 :             let tenant_shard_id = TenantShardId::unsharded(TenantId::generate());
     651           18 : 
     652           18 :             let manager = PostgresRedoManager::new(conf, tenant_shard_id);
     653           18 : 
     654           18 :             Ok(RedoHarness {
     655           18 :                 _repo_dir: repo_dir,
     656           18 :                 manager,
     657           18 :                 tenant_shard_id,
     658           18 :             })
     659           18 :         }
     660           18 :         fn span(&self) -> tracing::Span {
     661           18 :             tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
     662           18 :         }
     663              :     }
     664              : }
        

Generated by: LCOV version 2.1-beta