Line data Source code
1 : //!
2 : //! WAL redo. This service runs PostgreSQL in a special wal_redo mode
3 : //! to apply given WAL records over an old page image and return new
4 : //! page image.
5 : //!
6 : //! We rely on Postgres to perform WAL redo for us. We launch a
7 : //! postgres process in special "wal redo" mode that's similar to
8 : //! single-user mode. We then pass the previous page image, if any,
9 : //! and all the WAL records we want to apply, to the postgres
10 : //! process. Then we get the page image back. Communication with the
11 : //! postgres process happens via stdin/stdout
12 : //!
13 : //! See pgxn/neon_walredo/walredoproc.c for the other side of
14 : //! this communication.
15 : //!
16 : //! The Postgres process is assumed to be secure against malicious WAL
17 : //! records. It achieves it by dropping privileges before replaying
18 : //! any WAL records, so that even if an attacker hijacks the Postgres
19 : //! process, he cannot escape out of it.
20 :
21 : /// Process lifecycle and abstracction for the IPC protocol.
22 : mod process;
23 :
24 : /// Code to apply [`NeonWalRecord`]s.
25 : pub(crate) mod apply_neon;
26 :
27 : use crate::config::PageServerConf;
28 : use crate::metrics::{
29 : WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
30 : WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_TIME,
31 : };
32 : use crate::repository::Key;
33 : use crate::walrecord::NeonWalRecord;
34 : use anyhow::Context;
35 : use bytes::{Bytes, BytesMut};
36 : use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus};
37 : use pageserver_api::shard::TenantShardId;
38 : use std::sync::Arc;
39 : use std::time::Duration;
40 : use std::time::Instant;
41 : use tracing::*;
42 : use utils::lsn::Lsn;
43 : use utils::sync::gate::GateError;
44 : use utils::sync::heavier_once_cell;
45 :
46 : ///
47 : /// This is the real implementation that uses a Postgres process to
48 : /// perform WAL replay. Only one thread can use the process at a time,
49 : /// that is controlled by the Mutex. In the future, we might want to
50 : /// launch a pool of processes to allow concurrent replay of multiple
51 : /// records.
52 : ///
53 : pub struct PostgresRedoManager {
54 : tenant_shard_id: TenantShardId,
55 : conf: &'static PageServerConf,
56 : last_redo_at: std::sync::Mutex<Option<Instant>>,
57 : /// We use [`heavier_once_cell`] for
58 : ///
59 : /// 1. coalescing the lazy spawning of walredo processes ([`ProcessOnceCell::Spawned`])
60 : /// 2. prevent new processes from being spawned on [`Self::shutdown`] (=> [`ProcessOnceCell::ManagerShutDown`]).
61 : ///
62 : /// # Spawning
63 : ///
64 : /// Redo requests use the once cell to coalesce onto one call to [`process::WalRedoProcess::launch`].
65 : ///
66 : /// Notably, requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the
67 : /// their process object; we use [`Arc::clone`] for that.
68 : ///
69 : /// This is primarily because earlier implementations that didn't use [`heavier_once_cell`]
70 : /// had that behavior; it's probably unnecessary.
71 : /// The only merit of it is that if one walredo process encounters an error,
72 : /// it can take it out of rotation (= using [`heavier_once_cell::Guard::take_and_deinit`].
73 : /// and retry redo, thereby starting the new process, while other redo tasks might
74 : /// still be using the old redo process. But, those other tasks will most likely
75 : /// encounter an error as well, and errors are an unexpected condition anyway.
76 : /// So, probably we could get rid of the `Arc` in the future.
77 : ///
78 : /// # Shutdown
79 : ///
80 : /// See [`Self::launched_processes`].
81 : redo_process: heavier_once_cell::OnceCell<ProcessOnceCell>,
82 :
83 : /// Gate that is entered when launching a walredo process and held open
84 : /// until the process has been `kill()`ed and `wait()`ed upon.
85 : ///
86 : /// Manager shutdown waits for this gate to close after setting the
87 : /// [`ProcessOnceCell::ManagerShutDown`] state in [`Self::redo_process`].
88 : ///
89 : /// This type of usage is a bit unusual because gates usually keep track of
90 : /// concurrent operations, e.g., every [`Self::request_redo`] that is inflight.
91 : /// But we use it here to keep track of the _processes_ that we have launched,
92 : /// which may outlive any individual redo request because
93 : /// - we keep walredo process around until its quiesced to amortize spawn cost and
94 : /// - the Arc may be held by multiple concurrent redo requests, so, just because
95 : /// you replace the [`Self::redo_process`] cell's content doesn't mean the
96 : /// process gets killed immediately.
97 : ///
98 : /// We could simplify this by getting rid of the [`Arc`].
99 : /// See the comment on [`Self::redo_process`] for more details.
100 : launched_processes: utils::sync::gate::Gate,
101 : }
102 :
103 : /// See [`PostgresRedoManager::redo_process`].
104 : enum ProcessOnceCell {
105 : Spawned(Arc<Process>),
106 : ManagerShutDown,
107 : }
108 :
109 : struct Process {
110 : _launched_processes_guard: utils::sync::gate::GateGuard,
111 : process: process::WalRedoProcess,
112 : }
113 :
114 : impl std::ops::Deref for Process {
115 : type Target = process::WalRedoProcess;
116 :
117 16 : fn deref(&self) -> &Self::Target {
118 16 : &self.process
119 16 : }
120 : }
121 :
122 0 : #[derive(Debug, thiserror::Error)]
123 : pub enum Error {
124 : #[error("cancelled")]
125 : Cancelled,
126 : #[error(transparent)]
127 : Other(#[from] anyhow::Error),
128 : }
129 :
130 : macro_rules! bail {
131 : ($($arg:tt)*) => {
132 : return Err($crate::walredo::Error::Other(::anyhow::anyhow!($($arg)*)));
133 : }
134 : }
135 :
136 : ///
137 : /// Public interface of WAL redo manager
138 : ///
139 : impl PostgresRedoManager {
140 : ///
141 : /// Request the WAL redo manager to apply some WAL records
142 : ///
143 : /// The WAL redo is handled by a separate thread, so this just sends a request
144 : /// to the thread and waits for response.
145 : ///
146 : /// # Cancel-Safety
147 : ///
148 : /// This method is cancellation-safe.
149 6 : pub async fn request_redo(
150 6 : &self,
151 6 : key: Key,
152 6 : lsn: Lsn,
153 6 : base_img: Option<(Lsn, Bytes)>,
154 6 : records: Vec<(Lsn, NeonWalRecord)>,
155 6 : pg_version: u32,
156 6 : ) -> Result<Bytes, Error> {
157 6 : if records.is_empty() {
158 0 : bail!("invalid WAL redo request with no records");
159 6 : }
160 6 :
161 6 : let base_img_lsn = base_img.as_ref().map(|p| p.0).unwrap_or(Lsn::INVALID);
162 6 : let mut img = base_img.map(|p| p.1);
163 6 : let mut batch_neon = apply_neon::can_apply_in_neon(&records[0].1);
164 6 : let mut batch_start = 0;
165 6 : for (i, record) in records.iter().enumerate().skip(1) {
166 6 : let rec_neon = apply_neon::can_apply_in_neon(&record.1);
167 6 :
168 6 : if rec_neon != batch_neon {
169 0 : let result = if batch_neon {
170 0 : self.apply_batch_neon(key, lsn, img, &records[batch_start..i])
171 : } else {
172 0 : self.apply_batch_postgres(
173 0 : key,
174 0 : lsn,
175 0 : img,
176 0 : base_img_lsn,
177 0 : &records[batch_start..i],
178 0 : self.conf.wal_redo_timeout,
179 0 : pg_version,
180 0 : )
181 0 : .await
182 : };
183 0 : img = Some(result?);
184 :
185 0 : batch_neon = rec_neon;
186 0 : batch_start = i;
187 6 : }
188 : }
189 : // last batch
190 6 : if batch_neon {
191 0 : self.apply_batch_neon(key, lsn, img, &records[batch_start..])
192 : } else {
193 6 : self.apply_batch_postgres(
194 6 : key,
195 6 : lsn,
196 6 : img,
197 6 : base_img_lsn,
198 6 : &records[batch_start..],
199 6 : self.conf.wal_redo_timeout,
200 6 : pg_version,
201 6 : )
202 16 : .await
203 : }
204 6 : }
205 :
206 0 : pub fn status(&self) -> WalRedoManagerStatus {
207 0 : WalRedoManagerStatus {
208 0 : last_redo_at: {
209 0 : let at = *self.last_redo_at.lock().unwrap();
210 0 : at.and_then(|at| {
211 0 : let age = at.elapsed();
212 0 : // map any chrono errors silently to None here
213 0 : chrono::Utc::now().checked_sub_signed(chrono::Duration::from_std(age).ok()?)
214 0 : })
215 0 : },
216 0 : process: self.redo_process.get().and_then(|p| match &*p {
217 0 : ProcessOnceCell::Spawned(p) => Some(WalRedoManagerProcessStatus { pid: p.id() }),
218 0 : ProcessOnceCell::ManagerShutDown => None,
219 0 : }),
220 0 : }
221 0 : }
222 : }
223 :
224 : impl PostgresRedoManager {
225 : ///
226 : /// Create a new PostgresRedoManager.
227 : ///
228 6 : pub fn new(
229 6 : conf: &'static PageServerConf,
230 6 : tenant_shard_id: TenantShardId,
231 6 : ) -> PostgresRedoManager {
232 6 : // The actual process is launched lazily, on first request.
233 6 : PostgresRedoManager {
234 6 : tenant_shard_id,
235 6 : conf,
236 6 : last_redo_at: std::sync::Mutex::default(),
237 6 : redo_process: heavier_once_cell::OnceCell::default(),
238 6 : launched_processes: utils::sync::gate::Gate::default(),
239 6 : }
240 6 : }
241 :
242 : /// Shut down the WAL redo manager.
243 : ///
244 : /// Returns `true` if this call was the one that initiated shutdown.
245 : /// `true` may be observed by no caller if the first caller stops polling.
246 : ///
247 : /// After this future completes
248 : /// - no redo process is running
249 : /// - no new redo process will be spawned
250 : /// - redo requests that need walredo process will fail with [`Error::Cancelled`]
251 : /// - [`apply_neon`]-only redo requests may still work, but this may change in the future
252 : ///
253 : /// # Cancel-Safety
254 : ///
255 : /// This method is cancellation-safe.
256 0 : pub async fn shutdown(&self) -> bool {
257 : // prevent new processes from being spawned
258 0 : let maybe_permit = match self.redo_process.get_or_init_detached().await {
259 0 : Ok(guard) => {
260 0 : if matches!(&*guard, ProcessOnceCell::ManagerShutDown) {
261 0 : None
262 : } else {
263 0 : let (proc, permit) = guard.take_and_deinit();
264 0 : drop(proc); // this just drops the Arc, its refcount may not be zero yet
265 0 : Some(permit)
266 : }
267 : }
268 0 : Err(permit) => Some(permit),
269 : };
270 0 : let it_was_us = if let Some(permit) = maybe_permit {
271 0 : self.redo_process
272 0 : .set(ProcessOnceCell::ManagerShutDown, permit);
273 0 : true
274 : } else {
275 0 : false
276 : };
277 : // wait for ongoing requests to drain and the refcounts of all Arc<WalRedoProcess> that
278 : // we ever launched to drop to zero, which when it happens synchronously kill()s & wait()s
279 : // for the underlying process.
280 0 : self.launched_processes.close().await;
281 0 : it_was_us
282 0 : }
283 :
284 : /// This type doesn't have its own background task to check for idleness: we
285 : /// rely on our owner calling this function periodically in its own housekeeping
286 : /// loops.
287 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
288 0 : if let Ok(g) = self.last_redo_at.try_lock() {
289 0 : if let Some(last_redo_at) = *g {
290 0 : if last_redo_at.elapsed() >= idle_timeout {
291 0 : drop(g);
292 0 : drop(self.redo_process.get().map(|guard| guard.take_and_deinit()));
293 0 : }
294 0 : }
295 0 : }
296 0 : }
297 :
298 : ///
299 : /// Process one request for WAL redo using wal-redo postgres
300 : ///
301 : /// # Cancel-Safety
302 : ///
303 : /// Cancellation safe.
304 : #[allow(clippy::too_many_arguments)]
305 6 : async fn apply_batch_postgres(
306 6 : &self,
307 6 : key: Key,
308 6 : lsn: Lsn,
309 6 : base_img: Option<Bytes>,
310 6 : base_img_lsn: Lsn,
311 6 : records: &[(Lsn, NeonWalRecord)],
312 6 : wal_redo_timeout: Duration,
313 6 : pg_version: u32,
314 6 : ) -> Result<Bytes, Error> {
315 6 : *(self.last_redo_at.lock().unwrap()) = Some(Instant::now());
316 :
317 6 : let (rel, blknum) = key.to_rel_block().context("invalid record")?;
318 : const MAX_RETRY_ATTEMPTS: u32 = 1;
319 6 : let mut n_attempts = 0u32;
320 : loop {
321 8 : let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await {
322 0 : Ok(guard) => match &*guard {
323 0 : ProcessOnceCell::Spawned(proc) => Arc::clone(proc),
324 : ProcessOnceCell::ManagerShutDown => {
325 0 : return Err(Error::Cancelled);
326 : }
327 : },
328 8 : Err(permit) => {
329 8 : let start = Instant::now();
330 8 : let proc = Arc::new(Process {
331 8 : _launched_processes_guard: match self.launched_processes.enter() {
332 8 : Ok(guard) => guard,
333 0 : Err(GateError::GateClosed) => unreachable!(
334 0 : "shutdown sets the once cell to `ManagerShutDown` state before closing the gate"
335 0 : ),
336 : },
337 8 : process: process::WalRedoProcess::launch(
338 8 : self.conf,
339 8 : self.tenant_shard_id,
340 8 : pg_version,
341 8 : )
342 8 : .context("launch walredo process")?,
343 : });
344 8 : let duration = start.elapsed();
345 8 : WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64());
346 8 : info!(
347 0 : duration_ms = duration.as_millis(),
348 0 : pid = proc.id(),
349 0 : "launched walredo process"
350 : );
351 8 : self.redo_process
352 8 : .set(ProcessOnceCell::Spawned(Arc::clone(&proc)), permit);
353 8 : proc
354 : }
355 : };
356 :
357 8 : let started_at = std::time::Instant::now();
358 :
359 : // Relational WAL records are applied using wal-redo-postgres
360 8 : let result = proc
361 8 : .apply_wal_records(rel, blknum, &base_img, records, wal_redo_timeout)
362 16 : .await
363 8 : .context("apply_wal_records");
364 8 :
365 8 : let duration = started_at.elapsed();
366 8 :
367 8 : let len = records.len();
368 16 : let nbytes = records.iter().fold(0, |acumulator, record| {
369 16 : acumulator
370 16 : + match &record.1 {
371 16 : NeonWalRecord::Postgres { rec, .. } => rec.len(),
372 0 : _ => unreachable!("Only PostgreSQL records are accepted in this batch"),
373 : }
374 16 : });
375 8 :
376 8 : WAL_REDO_TIME.observe(duration.as_secs_f64());
377 8 : WAL_REDO_RECORDS_HISTOGRAM.observe(len as f64);
378 8 : WAL_REDO_BYTES_HISTOGRAM.observe(nbytes as f64);
379 8 :
380 8 : debug!(
381 0 : "postgres applied {} WAL records ({} bytes) in {} us to reconstruct page image at LSN {}",
382 0 : len,
383 0 : nbytes,
384 0 : duration.as_micros(),
385 : lsn
386 : );
387 :
388 : // If something went wrong, don't try to reuse the process. Kill it, and
389 : // next request will launch a new one.
390 8 : if let Err(e) = result.as_ref() {
391 4 : error!(
392 0 : "error applying {} WAL records {}..{} ({} bytes) to key {key}, from base image with LSN {} to reconstruct page image at LSN {} n_attempts={}: {:?}",
393 0 : records.len(),
394 4 : records.first().map(|p| p.0).unwrap_or(Lsn(0)),
395 4 : records.last().map(|p| p.0).unwrap_or(Lsn(0)),
396 : nbytes,
397 : base_img_lsn,
398 : lsn,
399 : n_attempts,
400 : e,
401 : );
402 : // Avoid concurrent callers hitting the same issue by taking `proc` out of the rotation.
403 : // Note that there may be other tasks concurrent with us that also hold `proc`.
404 : // We have to deal with that here.
405 : // Also read the doc comment on field `self.redo_process`.
406 : //
407 : // NB: there may still be other concurrent threads using `proc`.
408 : // The last one will send SIGKILL when the underlying Arc reaches refcount 0.
409 : //
410 : // NB: the drop impl blocks the dropping thread with a wait() system call for
411 : // the child process. In some ways the blocking is actually good: if we
412 : // deferred the waiting into the background / to tokio if we used `tokio::process`,
413 : // it could happen that if walredo always fails immediately, we spawn processes faster
414 : // than we can SIGKILL & `wait` for them to exit. By doing it the way we do here,
415 : // we limit this risk of run-away to at most $num_runtimes * $num_executor_threads.
416 : // This probably needs revisiting at some later point.
417 4 : match self.redo_process.get() {
418 0 : None => (),
419 4 : Some(guard) => {
420 4 : match &*guard {
421 0 : ProcessOnceCell::ManagerShutDown => {}
422 4 : ProcessOnceCell::Spawned(guard_proc) => {
423 4 : if Arc::ptr_eq(&proc, guard_proc) {
424 4 : // We're the first to observe an error from `proc`, it's our job to take it out of rotation.
425 4 : guard.take_and_deinit();
426 4 : } else {
427 0 : // Another task already spawned another redo process (further up in this method)
428 0 : // and put it into `redo_process`. Do nothing, our view of the world is behind.
429 0 : }
430 : }
431 : }
432 : }
433 : }
434 : // The last task that does this `drop()` of `proc` will do a blocking `wait()` syscall.
435 4 : drop(proc);
436 4 : } else if n_attempts != 0 {
437 0 : info!(n_attempts, "retried walredo succeeded");
438 4 : }
439 8 : n_attempts += 1;
440 8 : if n_attempts > MAX_RETRY_ATTEMPTS || result.is_ok() {
441 6 : return result.map_err(Error::Other);
442 2 : }
443 : }
444 6 : }
445 :
446 : ///
447 : /// Process a batch of WAL records using bespoken Neon code.
448 : ///
449 0 : fn apply_batch_neon(
450 0 : &self,
451 0 : key: Key,
452 0 : lsn: Lsn,
453 0 : base_img: Option<Bytes>,
454 0 : records: &[(Lsn, NeonWalRecord)],
455 0 : ) -> Result<Bytes, Error> {
456 0 : let start_time = Instant::now();
457 0 :
458 0 : let mut page = BytesMut::new();
459 0 : if let Some(fpi) = base_img {
460 0 : // If full-page image is provided, then use it...
461 0 : page.extend_from_slice(&fpi[..]);
462 0 : } else {
463 : // All the current WAL record types that we can handle require a base image.
464 0 : bail!("invalid neon WAL redo request with no base image");
465 : }
466 :
467 : // Apply all the WAL records in the batch
468 0 : for (record_lsn, record) in records.iter() {
469 0 : self.apply_record_neon(key, &mut page, *record_lsn, record)?;
470 : }
471 : // Success!
472 0 : let duration = start_time.elapsed();
473 0 : // FIXME: using the same metric here creates a bimodal distribution by default, and because
474 0 : // there could be multiple batch sizes this would be N+1 modal.
475 0 : WAL_REDO_TIME.observe(duration.as_secs_f64());
476 0 :
477 0 : debug!(
478 0 : "neon applied {} WAL records in {} us to reconstruct page image at LSN {}",
479 0 : records.len(),
480 0 : duration.as_micros(),
481 : lsn
482 : );
483 :
484 0 : Ok(page.freeze())
485 0 : }
486 :
487 0 : fn apply_record_neon(
488 0 : &self,
489 0 : key: Key,
490 0 : page: &mut BytesMut,
491 0 : record_lsn: Lsn,
492 0 : record: &NeonWalRecord,
493 0 : ) -> anyhow::Result<()> {
494 0 : apply_neon::apply_in_neon(record, record_lsn, key, page)?;
495 :
496 0 : Ok(())
497 0 : }
498 : }
499 :
500 : #[cfg(test)]
501 : mod tests {
502 : use super::PostgresRedoManager;
503 : use crate::repository::Key;
504 : use crate::{config::PageServerConf, walrecord::NeonWalRecord};
505 : use bytes::Bytes;
506 : use pageserver_api::shard::TenantShardId;
507 : use std::str::FromStr;
508 : use tracing::Instrument;
509 : use utils::{id::TenantId, lsn::Lsn};
510 :
511 : #[tokio::test]
512 2 : async fn short_v14_redo() {
513 2 : let expected = std::fs::read("test_data/short_v14_redo.page").unwrap();
514 2 :
515 2 : let h = RedoHarness::new().unwrap();
516 2 :
517 2 : let page = h
518 2 : .manager
519 2 : .request_redo(
520 2 : Key {
521 2 : field1: 0,
522 2 : field2: 1663,
523 2 : field3: 13010,
524 2 : field4: 1259,
525 2 : field5: 0,
526 2 : field6: 0,
527 2 : },
528 2 : Lsn::from_str("0/16E2408").unwrap(),
529 2 : None,
530 2 : short_records(),
531 2 : 14,
532 2 : )
533 2 : .instrument(h.span())
534 4 : .await
535 2 : .unwrap();
536 2 :
537 2 : assert_eq!(&expected, &*page);
538 2 : }
539 :
540 : #[tokio::test]
541 2 : async fn short_v14_fails_for_wrong_key_but_returns_zero_page() {
542 2 : let h = RedoHarness::new().unwrap();
543 2 :
544 2 : let page = h
545 2 : .manager
546 2 : .request_redo(
547 2 : Key {
548 2 : field1: 0,
549 2 : field2: 1663,
550 2 : // key should be 13010
551 2 : field3: 13130,
552 2 : field4: 1259,
553 2 : field5: 0,
554 2 : field6: 0,
555 2 : },
556 2 : Lsn::from_str("0/16E2408").unwrap(),
557 2 : None,
558 2 : short_records(),
559 2 : 14,
560 2 : )
561 2 : .instrument(h.span())
562 4 : .await
563 2 : .unwrap();
564 2 :
565 2 : // TODO: there will be some stderr printout, which is forwarded to tracing that could
566 2 : // perhaps be captured as long as it's in the same thread.
567 2 : assert_eq!(page, crate::ZERO_PAGE);
568 2 : }
569 :
570 : #[tokio::test]
571 2 : async fn test_stderr() {
572 2 : let h = RedoHarness::new().unwrap();
573 2 : h
574 2 : .manager
575 2 : .request_redo(
576 2 : Key::from_i128(0),
577 2 : Lsn::INVALID,
578 2 : None,
579 2 : short_records(),
580 2 : 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
581 2 : )
582 2 : .instrument(h.span())
583 8 : .await
584 2 : .unwrap_err();
585 2 : }
586 :
587 : #[allow(clippy::octal_escapes)]
588 6 : fn short_records() -> Vec<(Lsn, NeonWalRecord)> {
589 6 : vec![
590 6 : (
591 6 : Lsn::from_str("0/16A9388").unwrap(),
592 6 : NeonWalRecord::Postgres {
593 6 : will_init: true,
594 6 : rec: Bytes::from_static(b"j\x03\0\0\0\x04\0\0\xe8\x7fj\x01\0\0\0\0\0\n\0\0\xd0\x16\x13Y\0\x10\0\04\x03\xd4\0\x05\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x03\0\0\0\0\x80\xeca\x01\0\0\x01\0\xd4\0\xa0\x1d\0 \x04 \0\0\0\0/\0\x01\0\xa0\x9dX\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\00\x9f\x9a\x01P\x9e\xb2\x01\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0!\0\x01\x08 \xff\xff\xff?\0\0\0\0\0\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\0\0\0\0\0\0\x80\xbf\0\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\0\0\0\0\x0c\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0/\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0\xdf\x04\0\0pg_type\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0G\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0@\x16D\x0e\0\0\0K\x10\0\0\x01\0pr \0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0[\x01\0\0\0\0\0\0\0\t\x04\0\0\x02\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0C\x01\0\0\x15\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0;\n\0\0pg_statistic\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xfd.\0\0\0\0\0\0\n\0\0\0\x02\0\0\0;\n\0\0\0\0\0\0\x13\0\0\0\0\0\xcbC\x13\0\0\0\x18\x0b\0\0\x01\0pr\x1f\0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0C\x01\0\0\0\0\0\0\0\t\x04\0\0\x01\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\x02\0\x01")
595 6 : }
596 6 : ),
597 6 : (
598 6 : Lsn::from_str("0/16D4080").unwrap(),
599 6 : NeonWalRecord::Postgres {
600 6 : will_init: false,
601 6 : rec: Bytes::from_static(b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0")
602 6 : }
603 6 : )
604 6 : ]
605 6 : }
606 :
607 : struct RedoHarness {
608 : // underscored because unused, except for removal at drop
609 : _repo_dir: camino_tempfile::Utf8TempDir,
610 : manager: PostgresRedoManager,
611 : tenant_shard_id: TenantShardId,
612 : }
613 :
614 : impl RedoHarness {
615 6 : fn new() -> anyhow::Result<Self> {
616 6 : crate::tenant::harness::setup_logging();
617 :
618 6 : let repo_dir = camino_tempfile::tempdir()?;
619 6 : let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
620 6 : let conf = Box::leak(Box::new(conf));
621 6 : let tenant_shard_id = TenantShardId::unsharded(TenantId::generate());
622 6 :
623 6 : let manager = PostgresRedoManager::new(conf, tenant_shard_id);
624 6 :
625 6 : Ok(RedoHarness {
626 6 : _repo_dir: repo_dir,
627 6 : manager,
628 6 : tenant_shard_id,
629 6 : })
630 6 : }
631 6 : fn span(&self) -> tracing::Span {
632 6 : tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
633 6 : }
634 : }
635 : }
|