Line data Source code
1 : //!
2 : //! WAL redo. This service runs PostgreSQL in a special wal_redo mode
3 : //! to apply given WAL records over an old page image and return new
4 : //! page image.
5 : //!
6 : //! We rely on Postgres to perform WAL redo for us. We launch a
7 : //! postgres process in special "wal redo" mode that's similar to
8 : //! single-user mode. We then pass the previous page image, if any,
9 : //! and all the WAL records we want to apply, to the postgres
10 : //! process. Then we get the page image back. Communication with the
11 : //! postgres process happens via stdin/stdout
12 : //!
13 : //! See pgxn/neon_walredo/walredoproc.c for the other side of
14 : //! this communication.
15 : //!
16 : //! The Postgres process is assumed to be secure against malicious WAL
17 : //! records. It achieves it by dropping privileges before replaying
18 : //! any WAL records, so that even if an attacker hijacks the Postgres
19 : //! process, he cannot escape out of it.
20 :
21 : /// Process lifecycle and abstracction for the IPC protocol.
22 : mod process;
23 :
24 : /// Code to apply [`NeonWalRecord`]s.
25 : pub(crate) mod apply_neon;
26 :
27 : use std::future::Future;
28 : use std::sync::Arc;
29 : use std::time::{Duration, Instant};
30 :
31 : use anyhow::Context;
32 : use bytes::{Bytes, BytesMut};
33 : use pageserver_api::key::Key;
34 : use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus};
35 : use pageserver_api::record::NeonWalRecord;
36 : use pageserver_api::shard::TenantShardId;
37 : use tracing::*;
38 : use utils::lsn::Lsn;
39 : use utils::sync::gate::GateError;
40 : use utils::sync::heavier_once_cell;
41 :
42 : use crate::config::PageServerConf;
43 : use crate::metrics::{
44 : WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
45 : WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_TIME,
46 : };
47 :
48 : /// The real implementation that uses a Postgres process to
49 : /// perform WAL replay.
50 : ///
51 : /// Only one thread can use the process at a time, that is controlled by the
52 : /// Mutex. In the future, we might want to launch a pool of processes to allow
53 : /// concurrent replay of multiple records.
54 : pub struct PostgresRedoManager {
55 : tenant_shard_id: TenantShardId,
56 : conf: &'static PageServerConf,
57 : last_redo_at: std::sync::Mutex<Option<Instant>>,
58 : /// We use [`heavier_once_cell`] for
59 : ///
60 : /// 1. coalescing the lazy spawning of walredo processes ([`ProcessOnceCell::Spawned`])
61 : /// 2. prevent new processes from being spawned on [`Self::shutdown`] (=> [`ProcessOnceCell::ManagerShutDown`]).
62 : ///
63 : /// # Spawning
64 : ///
65 : /// Redo requests use the once cell to coalesce onto one call to [`process::WalRedoProcess::launch`].
66 : ///
67 : /// Notably, requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the
68 : /// their process object; we use [`Arc::clone`] for that.
69 : ///
70 : /// This is primarily because earlier implementations that didn't use [`heavier_once_cell`]
71 : /// had that behavior; it's probably unnecessary.
72 : /// The only merit of it is that if one walredo process encounters an error,
73 : /// it can take it out of rotation (= using [`heavier_once_cell::Guard::take_and_deinit`].
74 : /// and retry redo, thereby starting the new process, while other redo tasks might
75 : /// still be using the old redo process. But, those other tasks will most likely
76 : /// encounter an error as well, and errors are an unexpected condition anyway.
77 : /// So, probably we could get rid of the `Arc` in the future.
78 : ///
79 : /// # Shutdown
80 : ///
81 : /// See [`Self::launched_processes`].
82 : redo_process: heavier_once_cell::OnceCell<ProcessOnceCell>,
83 :
84 : /// Gate that is entered when launching a walredo process and held open
85 : /// until the process has been `kill()`ed and `wait()`ed upon.
86 : ///
87 : /// Manager shutdown waits for this gate to close after setting the
88 : /// [`ProcessOnceCell::ManagerShutDown`] state in [`Self::redo_process`].
89 : ///
90 : /// This type of usage is a bit unusual because gates usually keep track of
91 : /// concurrent operations, e.g., every [`Self::request_redo`] that is inflight.
92 : /// But we use it here to keep track of the _processes_ that we have launched,
93 : /// which may outlive any individual redo request because
94 : /// - we keep walredo process around until its quiesced to amortize spawn cost and
95 : /// - the Arc may be held by multiple concurrent redo requests, so, just because
96 : /// you replace the [`Self::redo_process`] cell's content doesn't mean the
97 : /// process gets killed immediately.
98 : ///
99 : /// We could simplify this by getting rid of the [`Arc`].
100 : /// See the comment on [`Self::redo_process`] for more details.
101 : launched_processes: utils::sync::gate::Gate,
102 : }
103 :
104 : /// See [`PostgresRedoManager::redo_process`].
105 : enum ProcessOnceCell {
106 : Spawned(Arc<Process>),
107 : ManagerShutDown,
108 : }
109 :
110 : struct Process {
111 : process: process::WalRedoProcess,
112 : /// This field is last in this struct so the guard gets dropped _after_ [`Self::process`].
113 : /// (Reminder: dropping [`Self::process`] synchronously sends SIGKILL and then `wait()`s for it to exit).
114 : _launched_processes_guard: utils::sync::gate::GateGuard,
115 : }
116 :
117 : impl std::ops::Deref for Process {
118 : type Target = process::WalRedoProcess;
119 :
120 48 : fn deref(&self) -> &Self::Target {
121 48 : &self.process
122 48 : }
123 : }
124 :
125 : #[derive(Debug, thiserror::Error)]
126 : pub enum Error {
127 : #[error("cancelled")]
128 : Cancelled,
129 : #[error(transparent)]
130 : Other(#[from] anyhow::Error),
131 : }
132 :
133 : macro_rules! bail {
134 : ($($arg:tt)*) => {
135 : return Err($crate::walredo::Error::Other(::anyhow::anyhow!($($arg)*)));
136 : }
137 : }
138 :
139 : #[derive(Debug, Clone, Copy)]
140 : pub enum RedoAttemptType {
141 : /// Used for the read path. Will fire critical errors and retry twice if failure.
142 : ReadPage,
143 : // Used for legacy compaction (only used in image compaction). Will fire critical errors and retry once if failure.
144 : LegacyCompaction,
145 : // Used for gc compaction. Will not fire critical errors and not retry.
146 : GcCompaction,
147 : }
148 :
149 : ///
150 : /// Public interface of WAL redo manager
151 : ///
152 : impl PostgresRedoManager {
153 : ///
154 : /// Request the WAL redo manager to apply some WAL records
155 : ///
156 : /// The WAL redo is handled by a separate thread, so this just sends a request
157 : /// to the thread and waits for response.
158 : ///
159 : /// # Cancel-Safety
160 : ///
161 : /// This method is cancellation-safe.
162 12 : pub async fn request_redo(
163 12 : &self,
164 12 : key: Key,
165 12 : lsn: Lsn,
166 12 : base_img: Option<(Lsn, Bytes)>,
167 12 : records: Vec<(Lsn, NeonWalRecord)>,
168 12 : pg_version: u32,
169 12 : redo_attempt_type: RedoAttemptType,
170 12 : ) -> Result<Bytes, Error> {
171 12 : if records.is_empty() {
172 0 : bail!("invalid WAL redo request with no records");
173 12 : }
174 :
175 12 : let max_retry_attempts = match redo_attempt_type {
176 12 : RedoAttemptType::ReadPage => 2,
177 0 : RedoAttemptType::LegacyCompaction => 1,
178 0 : RedoAttemptType::GcCompaction => 0,
179 : };
180 :
181 12 : let base_img_lsn = base_img.as_ref().map(|p| p.0).unwrap_or(Lsn::INVALID);
182 12 : let mut img = base_img.map(|p| p.1);
183 12 : let mut batch_neon = apply_neon::can_apply_in_neon(&records[0].1);
184 12 : let mut batch_start = 0;
185 12 : for (i, record) in records.iter().enumerate().skip(1) {
186 12 : let rec_neon = apply_neon::can_apply_in_neon(&record.1);
187 12 :
188 12 : if rec_neon != batch_neon {
189 0 : let result = if batch_neon {
190 0 : self.apply_batch_neon(key, lsn, img, &records[batch_start..i])
191 : } else {
192 0 : self.apply_batch_postgres(
193 0 : key,
194 0 : lsn,
195 0 : img,
196 0 : base_img_lsn,
197 0 : &records[batch_start..i],
198 0 : self.conf.wal_redo_timeout,
199 0 : pg_version,
200 0 : max_retry_attempts,
201 0 : )
202 0 : .await
203 : };
204 0 : img = Some(result?);
205 :
206 0 : batch_neon = rec_neon;
207 0 : batch_start = i;
208 12 : }
209 : }
210 : // last batch
211 12 : if batch_neon {
212 0 : self.apply_batch_neon(key, lsn, img, &records[batch_start..])
213 : } else {
214 12 : self.apply_batch_postgres(
215 12 : key,
216 12 : lsn,
217 12 : img,
218 12 : base_img_lsn,
219 12 : &records[batch_start..],
220 12 : self.conf.wal_redo_timeout,
221 12 : pg_version,
222 12 : max_retry_attempts,
223 12 : )
224 12 : .await
225 : }
226 12 : }
227 :
228 : /// Do a ping request-response roundtrip.
229 : ///
230 : /// Not used in production, but by Rust benchmarks.
231 : ///
232 : /// # Cancel-Safety
233 : ///
234 : /// This method is cancellation-safe.
235 4 : pub async fn ping(&self, pg_version: u32) -> Result<(), Error> {
236 4 : self.do_with_walredo_process(pg_version, |proc| async move {
237 4 : proc.ping(Duration::from_secs(1))
238 4 : .await
239 4 : .map_err(Error::Other)
240 4 : })
241 4 : .await
242 4 : }
243 :
244 0 : pub fn status(&self) -> WalRedoManagerStatus {
245 0 : WalRedoManagerStatus {
246 0 : last_redo_at: {
247 0 : let at = *self.last_redo_at.lock().unwrap();
248 0 : at.and_then(|at| {
249 0 : let age = at.elapsed();
250 0 : // map any chrono errors silently to None here
251 0 : chrono::Utc::now().checked_sub_signed(chrono::Duration::from_std(age).ok()?)
252 0 : })
253 0 : },
254 0 : process: self.redo_process.get().and_then(|p| match &*p {
255 0 : ProcessOnceCell::Spawned(p) => Some(WalRedoManagerProcessStatus { pid: p.id() }),
256 0 : ProcessOnceCell::ManagerShutDown => None,
257 0 : }),
258 0 : }
259 0 : }
260 : }
261 :
262 : impl PostgresRedoManager {
263 : ///
264 : /// Create a new PostgresRedoManager.
265 : ///
266 16 : pub fn new(
267 16 : conf: &'static PageServerConf,
268 16 : tenant_shard_id: TenantShardId,
269 16 : ) -> PostgresRedoManager {
270 16 : // The actual process is launched lazily, on first request.
271 16 : PostgresRedoManager {
272 16 : tenant_shard_id,
273 16 : conf,
274 16 : last_redo_at: std::sync::Mutex::default(),
275 16 : redo_process: heavier_once_cell::OnceCell::default(),
276 16 : launched_processes: utils::sync::gate::Gate::default(),
277 16 : }
278 16 : }
279 :
280 : /// Shut down the WAL redo manager.
281 : ///
282 : /// Returns `true` if this call was the one that initiated shutdown.
283 : /// `true` may be observed by no caller if the first caller stops polling.
284 : ///
285 : /// After this future completes
286 : /// - no redo process is running
287 : /// - no new redo process will be spawned
288 : /// - redo requests that need walredo process will fail with [`Error::Cancelled`]
289 : /// - [`apply_neon`]-only redo requests may still work, but this may change in the future
290 : ///
291 : /// # Cancel-Safety
292 : ///
293 : /// This method is cancellation-safe.
294 0 : pub async fn shutdown(&self) -> bool {
295 : // prevent new processes from being spawned
296 0 : let maybe_permit = match self.redo_process.get_or_init_detached().await {
297 0 : Ok(guard) => {
298 0 : if matches!(&*guard, ProcessOnceCell::ManagerShutDown) {
299 0 : None
300 : } else {
301 0 : let (proc, permit) = guard.take_and_deinit();
302 0 : drop(proc); // this just drops the Arc, its refcount may not be zero yet
303 0 : Some(permit)
304 : }
305 : }
306 0 : Err(permit) => Some(permit),
307 : };
308 0 : let it_was_us = if let Some(permit) = maybe_permit {
309 0 : self.redo_process
310 0 : .set(ProcessOnceCell::ManagerShutDown, permit);
311 0 : true
312 : } else {
313 0 : false
314 : };
315 : // wait for ongoing requests to drain and the refcounts of all Arc<WalRedoProcess> that
316 : // we ever launched to drop to zero, which when it happens synchronously kill()s & wait()s
317 : // for the underlying process.
318 0 : self.launched_processes.close().await;
319 0 : it_was_us
320 0 : }
321 :
322 : /// This type doesn't have its own background task to check for idleness: we
323 : /// rely on our owner calling this function periodically in its own housekeeping
324 : /// loops.
325 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
326 0 : if let Ok(g) = self.last_redo_at.try_lock() {
327 0 : if let Some(last_redo_at) = *g {
328 0 : if last_redo_at.elapsed() >= idle_timeout {
329 0 : drop(g);
330 0 : drop(self.redo_process.get().map(|guard| guard.take_and_deinit()));
331 0 : }
332 0 : }
333 0 : }
334 0 : }
335 :
336 : /// # Cancel-Safety
337 : ///
338 : /// This method is cancel-safe iff `closure` is cancel-safe.
339 24 : async fn do_with_walredo_process<
340 24 : F: FnOnce(Arc<Process>) -> Fut,
341 24 : Fut: Future<Output = Result<O, Error>>,
342 24 : O,
343 24 : >(
344 24 : &self,
345 24 : pg_version: u32,
346 24 : closure: F,
347 24 : ) -> Result<O, Error> {
348 24 : let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await {
349 0 : Ok(guard) => match &*guard {
350 0 : ProcessOnceCell::Spawned(proc) => Arc::clone(proc),
351 : ProcessOnceCell::ManagerShutDown => {
352 0 : return Err(Error::Cancelled);
353 : }
354 : },
355 24 : Err(permit) => {
356 24 : let start = Instant::now();
357 : // acquire guard before spawning process, so that we don't spawn new processes
358 : // if the gate is already closed.
359 24 : let _launched_processes_guard = match self.launched_processes.enter() {
360 24 : Ok(guard) => guard,
361 0 : Err(GateError::GateClosed) => unreachable!(
362 0 : "shutdown sets the once cell to `ManagerShutDown` state before closing the gate"
363 0 : ),
364 : };
365 24 : let proc = Arc::new(Process {
366 24 : process: process::WalRedoProcess::launch(
367 24 : self.conf,
368 24 : self.tenant_shard_id,
369 24 : pg_version,
370 24 : )
371 24 : .context("launch walredo process")?,
372 24 : _launched_processes_guard,
373 24 : });
374 24 : let duration = start.elapsed();
375 24 : WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64());
376 24 : info!(
377 0 : elapsed_ms = duration.as_millis(),
378 0 : pid = proc.id(),
379 0 : "launched walredo process"
380 : );
381 24 : self.redo_process
382 24 : .set(ProcessOnceCell::Spawned(Arc::clone(&proc)), permit);
383 24 : proc
384 : }
385 : };
386 :
387 : // async closures are unstable, would support &Process
388 24 : let result = closure(proc.clone()).await;
389 :
390 24 : if result.is_err() {
391 : // Avoid concurrent callers hitting the same issue by taking `proc` out of the rotation.
392 : // Note that there may be other tasks concurrent with us that also hold `proc`.
393 : // We have to deal with that here.
394 : // Also read the doc comment on field `self.redo_process`.
395 : //
396 : // NB: there may still be other concurrent threads using `proc`.
397 : // The last one will send SIGKILL when the underlying Arc reaches refcount 0.
398 : //
399 : // NB: the drop impl blocks the dropping thread with a wait() system call for
400 : // the child process. In some ways the blocking is actually good: if we
401 : // deferred the waiting into the background / to tokio if we used `tokio::process`,
402 : // it could happen that if walredo always fails immediately, we spawn processes faster
403 : // than we can SIGKILL & `wait` for them to exit. By doing it the way we do here,
404 : // we limit this risk of run-away to at most $num_runtimes * $num_executor_threads.
405 : // This probably needs revisiting at some later point.
406 12 : match self.redo_process.get() {
407 0 : None => (),
408 12 : Some(guard) => {
409 12 : match &*guard {
410 0 : ProcessOnceCell::ManagerShutDown => {}
411 12 : ProcessOnceCell::Spawned(guard_proc) => {
412 12 : if Arc::ptr_eq(&proc, guard_proc) {
413 12 : // We're the first to observe an error from `proc`, it's our job to take it out of rotation.
414 12 : guard.take_and_deinit();
415 12 : } else {
416 0 : // Another task already spawned another redo process (further up in this method)
417 0 : // and put it into `redo_process`. Do nothing, our view of the world is behind.
418 0 : }
419 : }
420 : }
421 : }
422 : }
423 : // The last task that does this `drop()` of `proc` will do a blocking `wait()` syscall.
424 12 : drop(proc);
425 12 : }
426 :
427 24 : result
428 24 : }
429 :
430 : ///
431 : /// Process one request for WAL redo using wal-redo postgres
432 : ///
433 : /// # Cancel-Safety
434 : ///
435 : /// Cancellation safe.
436 : #[allow(clippy::too_many_arguments)]
437 12 : async fn apply_batch_postgres(
438 12 : &self,
439 12 : key: Key,
440 12 : lsn: Lsn,
441 12 : base_img: Option<Bytes>,
442 12 : base_img_lsn: Lsn,
443 12 : records: &[(Lsn, NeonWalRecord)],
444 12 : wal_redo_timeout: Duration,
445 12 : pg_version: u32,
446 12 : max_retry_attempts: u32,
447 12 : ) -> Result<Bytes, Error> {
448 12 : *(self.last_redo_at.lock().unwrap()) = Some(Instant::now());
449 :
450 12 : let (rel, blknum) = key.to_rel_block().context("invalid record")?;
451 12 : let mut n_attempts = 0u32;
452 20 : loop {
453 20 : let base_img = &base_img;
454 20 : let closure = |proc: Arc<Process>| async move {
455 20 : let started_at = std::time::Instant::now();
456 :
457 : // Relational WAL records are applied using wal-redo-postgres
458 20 : let result = proc
459 20 : .apply_wal_records(rel, blknum, base_img, records, wal_redo_timeout)
460 20 : .await
461 20 : .context("apply_wal_records");
462 20 :
463 20 : let duration = started_at.elapsed();
464 20 :
465 20 : let len = records.len();
466 40 : let nbytes = records.iter().fold(0, |acumulator, record| {
467 40 : acumulator
468 40 : + match &record.1 {
469 40 : NeonWalRecord::Postgres { rec, .. } => rec.len(),
470 0 : _ => unreachable!("Only PostgreSQL records are accepted in this batch"),
471 : }
472 40 : });
473 20 :
474 20 : WAL_REDO_TIME.observe(duration.as_secs_f64());
475 20 : WAL_REDO_RECORDS_HISTOGRAM.observe(len as f64);
476 20 : WAL_REDO_BYTES_HISTOGRAM.observe(nbytes as f64);
477 20 :
478 20 : debug!(
479 0 : "postgres applied {} WAL records ({} bytes) in {} us to reconstruct page image at LSN {}",
480 0 : len,
481 0 : nbytes,
482 0 : duration.as_micros(),
483 : lsn
484 : );
485 :
486 20 : if let Err(e) = result.as_ref() {
487 12 : error!(
488 0 : "error applying {} WAL records {}..{} ({} bytes) to key {key}, from base image with LSN {} to reconstruct page image at LSN {} n_attempts={}: {:?}",
489 0 : records.len(),
490 12 : records.first().map(|p| p.0).unwrap_or(Lsn(0)),
491 12 : records.last().map(|p| p.0).unwrap_or(Lsn(0)),
492 : nbytes,
493 : base_img_lsn,
494 : lsn,
495 : n_attempts,
496 : e,
497 : );
498 8 : }
499 :
500 20 : result.map_err(Error::Other)
501 20 : };
502 20 : let result = self.do_with_walredo_process(pg_version, closure).await;
503 :
504 20 : if result.is_ok() && n_attempts != 0 {
505 0 : info!(n_attempts, "retried walredo succeeded");
506 20 : }
507 20 : n_attempts += 1;
508 20 : if n_attempts > max_retry_attempts || result.is_ok() {
509 12 : return result;
510 8 : }
511 : }
512 12 : }
513 :
514 : ///
515 : /// Process a batch of WAL records using bespoken Neon code.
516 : ///
517 0 : fn apply_batch_neon(
518 0 : &self,
519 0 : key: Key,
520 0 : lsn: Lsn,
521 0 : base_img: Option<Bytes>,
522 0 : records: &[(Lsn, NeonWalRecord)],
523 0 : ) -> Result<Bytes, Error> {
524 0 : let start_time = Instant::now();
525 0 :
526 0 : let mut page = BytesMut::new();
527 0 : if let Some(fpi) = base_img {
528 0 : // If full-page image is provided, then use it...
529 0 : page.extend_from_slice(&fpi[..]);
530 0 : } else {
531 : // All the current WAL record types that we can handle require a base image.
532 0 : bail!("invalid neon WAL redo request with no base image");
533 : }
534 :
535 : // Apply all the WAL records in the batch
536 0 : for (record_lsn, record) in records.iter() {
537 0 : self.apply_record_neon(key, &mut page, *record_lsn, record)?;
538 : }
539 : // Success!
540 0 : let duration = start_time.elapsed();
541 0 : // FIXME: using the same metric here creates a bimodal distribution by default, and because
542 0 : // there could be multiple batch sizes this would be N+1 modal.
543 0 : WAL_REDO_TIME.observe(duration.as_secs_f64());
544 0 :
545 0 : debug!(
546 0 : "neon applied {} WAL records in {} us to reconstruct page image at LSN {}",
547 0 : records.len(),
548 0 : duration.as_micros(),
549 : lsn
550 : );
551 :
552 0 : Ok(page.freeze())
553 0 : }
554 :
555 0 : fn apply_record_neon(
556 0 : &self,
557 0 : key: Key,
558 0 : page: &mut BytesMut,
559 0 : record_lsn: Lsn,
560 0 : record: &NeonWalRecord,
561 0 : ) -> anyhow::Result<()> {
562 0 : apply_neon::apply_in_neon(record, record_lsn, key, page)?;
563 :
564 0 : Ok(())
565 0 : }
566 : }
567 :
568 : #[cfg(test)]
569 : mod tests {
570 : use std::str::FromStr;
571 :
572 : use bytes::Bytes;
573 : use pageserver_api::key::Key;
574 : use pageserver_api::record::NeonWalRecord;
575 : use pageserver_api::shard::TenantShardId;
576 : use tracing::Instrument;
577 : use utils::id::TenantId;
578 : use utils::lsn::Lsn;
579 :
580 : use super::PostgresRedoManager;
581 : use crate::config::PageServerConf;
582 : use crate::walredo::RedoAttemptType;
583 :
584 : #[tokio::test]
585 4 : async fn test_ping() {
586 4 : let h = RedoHarness::new().unwrap();
587 4 :
588 4 : h.manager
589 4 : .ping(14)
590 4 : .instrument(h.span())
591 4 : .await
592 4 : .expect("ping should work");
593 4 : }
594 :
595 : #[tokio::test]
596 4 : async fn short_v14_redo() {
597 4 : let expected = std::fs::read("test_data/short_v14_redo.page").unwrap();
598 4 :
599 4 : let h = RedoHarness::new().unwrap();
600 4 :
601 4 : let page = h
602 4 : .manager
603 4 : .request_redo(
604 4 : Key {
605 4 : field1: 0,
606 4 : field2: 1663,
607 4 : field3: 13010,
608 4 : field4: 1259,
609 4 : field5: 0,
610 4 : field6: 0,
611 4 : },
612 4 : Lsn::from_str("0/16E2408").unwrap(),
613 4 : None,
614 4 : short_records(),
615 4 : 14,
616 4 : RedoAttemptType::ReadPage,
617 4 : )
618 4 : .instrument(h.span())
619 4 : .await
620 4 : .unwrap();
621 4 :
622 4 : assert_eq!(&expected, &*page);
623 4 : }
624 :
625 : #[tokio::test]
626 4 : async fn short_v14_fails_for_wrong_key_but_returns_zero_page() {
627 4 : let h = RedoHarness::new().unwrap();
628 4 :
629 4 : let page = h
630 4 : .manager
631 4 : .request_redo(
632 4 : Key {
633 4 : field1: 0,
634 4 : field2: 1663,
635 4 : // key should be 13010
636 4 : field3: 13130,
637 4 : field4: 1259,
638 4 : field5: 0,
639 4 : field6: 0,
640 4 : },
641 4 : Lsn::from_str("0/16E2408").unwrap(),
642 4 : None,
643 4 : short_records(),
644 4 : 14,
645 4 : RedoAttemptType::ReadPage,
646 4 : )
647 4 : .instrument(h.span())
648 4 : .await
649 4 : .unwrap();
650 4 :
651 4 : // TODO: there will be some stderr printout, which is forwarded to tracing that could
652 4 : // perhaps be captured as long as it's in the same thread.
653 4 : assert_eq!(page, crate::ZERO_PAGE);
654 4 : }
655 :
656 : #[tokio::test]
657 4 : async fn test_stderr() {
658 4 : let h = RedoHarness::new().unwrap();
659 4 : h
660 4 : .manager
661 4 : .request_redo(
662 4 : Key::from_i128(0),
663 4 : Lsn::INVALID,
664 4 : None,
665 4 : short_records(),
666 4 : 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
667 4 : RedoAttemptType::ReadPage,
668 4 : )
669 4 : .instrument(h.span())
670 4 : .await
671 4 : .unwrap_err();
672 4 : }
673 :
674 : #[allow(clippy::octal_escapes)]
675 12 : fn short_records() -> Vec<(Lsn, NeonWalRecord)> {
676 12 : vec![
677 12 : (
678 12 : Lsn::from_str("0/16A9388").unwrap(),
679 12 : NeonWalRecord::Postgres {
680 12 : will_init: true,
681 12 : rec: Bytes::from_static(b"j\x03\0\0\0\x04\0\0\xe8\x7fj\x01\0\0\0\0\0\n\0\0\xd0\x16\x13Y\0\x10\0\04\x03\xd4\0\x05\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x03\0\0\0\0\x80\xeca\x01\0\0\x01\0\xd4\0\xa0\x1d\0 \x04 \0\0\0\0/\0\x01\0\xa0\x9dX\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\00\x9f\x9a\x01P\x9e\xb2\x01\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0!\0\x01\x08 \xff\xff\xff?\0\0\0\0\0\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\0\0\0\0\0\0\x80\xbf\0\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\0\0\0\0\x0c\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0/\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0\xdf\x04\0\0pg_type\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0G\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0@\x16D\x0e\0\0\0K\x10\0\0\x01\0pr \0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0[\x01\0\0\0\0\0\0\0\t\x04\0\0\x02\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0C\x01\0\0\x15\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0;\n\0\0pg_statistic\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xfd.\0\0\0\0\0\0\n\0\0\0\x02\0\0\0;\n\0\0\0\0\0\0\x13\0\0\0\0\0\xcbC\x13\0\0\0\x18\x0b\0\0\x01\0pr\x1f\0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0C\x01\0\0\0\0\0\0\0\t\x04\0\0\x01\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\x02\0\x01")
682 12 : }
683 12 : ),
684 12 : (
685 12 : Lsn::from_str("0/16D4080").unwrap(),
686 12 : NeonWalRecord::Postgres {
687 12 : will_init: false,
688 12 : rec: Bytes::from_static(b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0")
689 12 : }
690 12 : )
691 12 : ]
692 12 : }
693 :
694 : struct RedoHarness {
695 : // underscored because unused, except for removal at drop
696 : _repo_dir: camino_tempfile::Utf8TempDir,
697 : manager: PostgresRedoManager,
698 : tenant_shard_id: TenantShardId,
699 : }
700 :
701 : impl RedoHarness {
702 16 : fn new() -> anyhow::Result<Self> {
703 16 : crate::tenant::harness::setup_logging();
704 :
705 16 : let repo_dir = camino_tempfile::tempdir()?;
706 16 : let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
707 16 : let conf = Box::leak(Box::new(conf));
708 16 : let tenant_shard_id = TenantShardId::unsharded(TenantId::generate());
709 16 :
710 16 : let manager = PostgresRedoManager::new(conf, tenant_shard_id);
711 16 :
712 16 : Ok(RedoHarness {
713 16 : _repo_dir: repo_dir,
714 16 : manager,
715 16 : tenant_shard_id,
716 16 : })
717 16 : }
718 16 : fn span(&self) -> tracing::Span {
719 16 : tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
720 16 : }
721 : }
722 : }
|