Line data Source code
1 : //!
2 : //! Generate a tarball with files needed to bootstrap ComputeNode.
3 : //!
4 : //! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
5 : //! It could use a better name.
6 : //!
7 : //! Stateless Postgres compute node is launched by sending a tarball
8 : //! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
9 : //! generated pg_control and dummy segment of WAL.
10 : //! This module is responsible for creation of such tarball
11 : //! from data stored in object storage.
12 : //!
13 : use anyhow::{anyhow, Context};
14 : use bytes::{BufMut, Bytes, BytesMut};
15 : use fail::fail_point;
16 : use pageserver_api::key::Key;
17 : use postgres_ffi::pg_constants;
18 : use std::fmt::Write as FmtWrite;
19 : use std::time::{Instant, SystemTime};
20 : use tokio::io;
21 : use tokio::io::AsyncWrite;
22 : use tracing::*;
23 :
24 : use tokio_tar::{Builder, EntryType, Header};
25 :
26 : use crate::context::RequestContext;
27 : use crate::pgdatadir_mapping::Version;
28 : use crate::tenant::Timeline;
29 : use pageserver_api::reltag::{RelTag, SlruKind};
30 :
31 : use postgres_ffi::dispatch_pgversion;
32 : use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
33 : use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PG_HBA};
34 : use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
35 : use postgres_ffi::XLogFileName;
36 : use postgres_ffi::PG_TLI;
37 : use postgres_ffi::{BLCKSZ, RELSEG_SIZE, WAL_SEGMENT_SIZE};
38 : use utils::lsn::Lsn;
39 :
40 0 : #[derive(Debug, thiserror::Error)]
41 : pub enum BasebackupError {
42 : #[error("basebackup pageserver error {0:#}")]
43 : Server(#[from] anyhow::Error),
44 : #[error("basebackup client error {0:#}")]
45 : Client(#[source] io::Error),
46 : }
47 :
48 : /// Create basebackup with non-rel data in it.
49 : /// Only include relational data if 'full_backup' is true.
50 : ///
51 : /// Currently we use empty 'req_lsn' in two cases:
52 : /// * During the basebackup right after timeline creation
53 : /// * When working without safekeepers. In this situation it is important to match the lsn
54 : /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
55 : /// to start the replication.
56 0 : pub async fn send_basebackup_tarball<'a, W>(
57 0 : write: &'a mut W,
58 0 : timeline: &'a Timeline,
59 0 : req_lsn: Option<Lsn>,
60 0 : prev_lsn: Option<Lsn>,
61 0 : full_backup: bool,
62 0 : replica: bool,
63 0 : ctx: &'a RequestContext,
64 0 : ) -> Result<(), BasebackupError>
65 0 : where
66 0 : W: AsyncWrite + Send + Sync + Unpin,
67 0 : {
68 : // Compute postgres doesn't have any previous WAL files, but the first
69 : // record that it's going to write needs to include the LSN of the
70 : // previous record (xl_prev). We include prev_record_lsn in the
71 : // "zenith.signal" file, so that postgres can read it during startup.
72 : //
73 : // We don't keep full history of record boundaries in the page server,
74 : // however, only the predecessor of the latest record on each
75 : // timeline. So we can only provide prev_record_lsn when you take a
76 : // base backup at the end of the timeline, i.e. at last_record_lsn.
77 : // Even at the end of the timeline, we sometimes don't have a valid
78 : // prev_lsn value; that happens if the timeline was just branched from
79 : // an old LSN and it doesn't have any WAL of its own yet. We will set
80 : // prev_lsn to Lsn(0) if we cannot provide the correct value.
81 0 : let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
82 : // Backup was requested at a particular LSN. The caller should've
83 : // already checked that it's a valid LSN.
84 :
85 : // If the requested point is the end of the timeline, we can
86 : // provide prev_lsn. (get_last_record_rlsn() might return it as
87 : // zero, though, if no WAL has been generated on this timeline
88 : // yet.)
89 0 : let end_of_timeline = timeline.get_last_record_rlsn();
90 0 : if req_lsn == end_of_timeline.last {
91 0 : (end_of_timeline.prev, req_lsn)
92 : } else {
93 0 : (Lsn(0), req_lsn)
94 : }
95 : } else {
96 : // Backup was requested at end of the timeline.
97 0 : let end_of_timeline = timeline.get_last_record_rlsn();
98 0 : (end_of_timeline.prev, end_of_timeline.last)
99 : };
100 :
101 : // Consolidate the derived and the provided prev_lsn values
102 0 : let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn {
103 0 : if backup_prev != Lsn(0) && backup_prev != provided_prev_lsn {
104 0 : return Err(BasebackupError::Server(anyhow!(
105 0 : "backup_prev {backup_prev} != provided_prev_lsn {provided_prev_lsn}"
106 0 : )));
107 0 : }
108 0 : provided_prev_lsn
109 : } else {
110 0 : backup_prev
111 : };
112 :
113 0 : info!(
114 0 : "taking basebackup lsn={}, prev_lsn={} (full_backup={}, replica={})",
115 : backup_lsn, prev_lsn, full_backup, replica
116 : );
117 :
118 0 : let basebackup = Basebackup {
119 0 : ar: Builder::new_non_terminated(write),
120 0 : timeline,
121 0 : lsn: backup_lsn,
122 0 : prev_record_lsn: prev_lsn,
123 0 : full_backup,
124 0 : replica,
125 0 : ctx,
126 0 : };
127 0 : basebackup
128 0 : .send_tarball()
129 0 : .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn))
130 0 : .await
131 0 : }
132 :
133 : /// This is short-living object only for the time of tarball creation,
134 : /// created mostly to avoid passing a lot of parameters between various functions
135 : /// used for constructing tarball.
136 : struct Basebackup<'a, W>
137 : where
138 : W: AsyncWrite + Send + Sync + Unpin,
139 : {
140 : ar: Builder<&'a mut W>,
141 : timeline: &'a Timeline,
142 : lsn: Lsn,
143 : prev_record_lsn: Lsn,
144 : full_backup: bool,
145 : replica: bool,
146 : ctx: &'a RequestContext,
147 : }
148 :
149 : /// A sink that accepts SLRU blocks ordered by key and forwards
150 : /// full segments to the archive.
151 : struct SlruSegmentsBuilder<'a, 'b, W>
152 : where
153 : W: AsyncWrite + Send + Sync + Unpin,
154 : {
155 : ar: &'a mut Builder<&'b mut W>,
156 : buf: Vec<u8>,
157 : current_segment: Option<(SlruKind, u32)>,
158 : total_blocks: usize,
159 : }
160 :
161 : impl<'a, 'b, W> SlruSegmentsBuilder<'a, 'b, W>
162 : where
163 : W: AsyncWrite + Send + Sync + Unpin,
164 : {
165 0 : fn new(ar: &'a mut Builder<&'b mut W>) -> Self {
166 0 : Self {
167 0 : ar,
168 0 : buf: Vec::new(),
169 0 : current_segment: None,
170 0 : total_blocks: 0,
171 0 : }
172 0 : }
173 :
174 0 : async fn add_block(&mut self, key: &Key, block: Bytes) -> Result<(), BasebackupError> {
175 0 : let (kind, segno, _) = key.to_slru_block()?;
176 :
177 0 : match kind {
178 : SlruKind::Clog => {
179 0 : if !(block.len() == BLCKSZ as usize || block.len() == BLCKSZ as usize + 8) {
180 0 : return Err(BasebackupError::Server(anyhow!(
181 0 : "invalid SlruKind::Clog record: block.len()={}",
182 0 : block.len()
183 0 : )));
184 0 : }
185 : }
186 : SlruKind::MultiXactMembers | SlruKind::MultiXactOffsets => {
187 0 : if block.len() != BLCKSZ as usize {
188 0 : return Err(BasebackupError::Server(anyhow!(
189 0 : "invalid {:?} record: block.len()={}",
190 0 : kind,
191 0 : block.len()
192 0 : )));
193 0 : }
194 : }
195 : }
196 :
197 0 : let segment = (kind, segno);
198 0 : match self.current_segment {
199 0 : None => {
200 0 : self.current_segment = Some(segment);
201 0 : self.buf
202 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
203 0 : }
204 0 : Some(current_seg) if current_seg == segment => {
205 0 : self.buf
206 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
207 0 : }
208 : Some(_) => {
209 0 : self.flush().await?;
210 :
211 0 : self.current_segment = Some(segment);
212 0 : self.buf
213 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
214 : }
215 : }
216 :
217 0 : Ok(())
218 0 : }
219 :
220 0 : async fn flush(&mut self) -> Result<(), BasebackupError> {
221 0 : let nblocks = self.buf.len() / BLCKSZ as usize;
222 0 : let (kind, segno) = self.current_segment.take().unwrap();
223 0 : let segname = format!("{}/{:>04X}", kind.to_str(), segno);
224 0 : let header = new_tar_header(&segname, self.buf.len() as u64)?;
225 0 : self.ar
226 0 : .append(&header, self.buf.as_slice())
227 0 : .await
228 0 : .map_err(BasebackupError::Client)?;
229 :
230 0 : self.total_blocks += nblocks;
231 0 : debug!("Added to basebackup slru {} relsize {}", segname, nblocks);
232 :
233 0 : self.buf.clear();
234 0 :
235 0 : Ok(())
236 0 : }
237 :
238 0 : async fn finish(mut self) -> Result<(), BasebackupError> {
239 0 : let res = if self.current_segment.is_none() || self.buf.is_empty() {
240 0 : Ok(())
241 : } else {
242 0 : self.flush().await
243 : };
244 :
245 0 : info!("Collected {} SLRU blocks", self.total_blocks);
246 :
247 0 : res
248 0 : }
249 : }
250 :
251 : impl<'a, W> Basebackup<'a, W>
252 : where
253 : W: AsyncWrite + Send + Sync + Unpin,
254 : {
255 0 : async fn send_tarball(mut self) -> Result<(), BasebackupError> {
256 : // TODO include checksum
257 :
258 0 : let lazy_slru_download = self.timeline.get_lazy_slru_download() && !self.full_backup;
259 :
260 0 : let pgversion = self.timeline.pg_version;
261 0 : let subdirs = dispatch_pgversion!(pgversion, &pgv::bindings::PGDATA_SUBDIRS[..]);
262 :
263 : // Create pgdata subdirs structure
264 0 : for dir in subdirs.iter() {
265 0 : let header = new_tar_header_dir(dir)?;
266 0 : self.ar
267 0 : .append(&header, &mut io::empty())
268 0 : .await
269 0 : .context("could not add directory to basebackup tarball")?;
270 : }
271 :
272 : // Send config files.
273 0 : for filepath in PGDATA_SPECIAL_FILES.iter() {
274 0 : if *filepath == "pg_hba.conf" {
275 0 : let data = PG_HBA.as_bytes();
276 0 : let header = new_tar_header(filepath, data.len() as u64)?;
277 0 : self.ar
278 0 : .append(&header, data)
279 0 : .await
280 0 : .context("could not add config file to basebackup tarball")?;
281 : } else {
282 0 : let header = new_tar_header(filepath, 0)?;
283 0 : self.ar
284 0 : .append(&header, &mut io::empty())
285 0 : .await
286 0 : .context("could not add config file to basebackup tarball")?;
287 : }
288 : }
289 0 : if !lazy_slru_download {
290 : // Gather non-relational files from object storage pages.
291 0 : let slru_partitions = self
292 0 : .timeline
293 0 : .get_slru_keyspace(Version::Lsn(self.lsn), self.ctx)
294 0 : .await
295 0 : .map_err(|e| BasebackupError::Server(e.into()))?
296 0 : .partition(
297 0 : self.timeline.get_shard_identity(),
298 0 : Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64,
299 0 : );
300 0 :
301 0 : let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar);
302 :
303 0 : for part in slru_partitions.parts {
304 0 : let blocks = self
305 0 : .timeline
306 0 : .get_vectored(part, self.lsn, self.ctx)
307 0 : .await
308 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
309 :
310 0 : for (key, block) in blocks {
311 0 : let block = block.map_err(|e| BasebackupError::Server(e.into()))?;
312 0 : slru_builder.add_block(&key, block).await?;
313 : }
314 : }
315 0 : slru_builder.finish().await?;
316 0 : }
317 :
318 0 : let mut min_restart_lsn: Lsn = Lsn::MAX;
319 : // Create tablespace directories
320 0 : for ((spcnode, dbnode), has_relmap_file) in self
321 0 : .timeline
322 0 : .list_dbdirs(self.lsn, self.ctx)
323 0 : .await
324 0 : .map_err(|e| BasebackupError::Server(e.into()))?
325 : {
326 0 : self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
327 :
328 : // If full backup is requested, include all relation files.
329 : // Otherwise only include init forks of unlogged relations.
330 0 : let rels = self
331 0 : .timeline
332 0 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
333 0 : .await
334 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
335 0 : for &rel in rels.iter() {
336 : // Send init fork as main fork to provide well formed empty
337 : // contents of UNLOGGED relations. Postgres copies it in
338 : // `reinit.c` during recovery.
339 0 : if rel.forknum == INIT_FORKNUM {
340 : // I doubt we need _init fork itself, but having it at least
341 : // serves as a marker relation is unlogged.
342 0 : self.add_rel(rel, rel).await?;
343 0 : self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
344 0 : continue;
345 0 : }
346 0 :
347 0 : if self.full_backup {
348 0 : if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
349 : {
350 : // skip this, will include it when we reach the init fork
351 0 : continue;
352 0 : }
353 0 : self.add_rel(rel, rel).await?;
354 0 : }
355 : }
356 : }
357 :
358 0 : let start_time = Instant::now();
359 0 : let aux_files = self
360 0 : .timeline
361 0 : .list_aux_files(self.lsn, self.ctx)
362 0 : .await
363 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
364 0 : let aux_scan_time = start_time.elapsed();
365 0 : let aux_estimated_size = aux_files
366 0 : .values()
367 0 : .map(|content| content.len())
368 0 : .sum::<usize>();
369 0 : info!(
370 0 : "Scanned {} aux files in {}ms, aux file content size = {}",
371 0 : aux_files.len(),
372 0 : aux_scan_time.as_millis(),
373 : aux_estimated_size
374 : );
375 :
376 0 : for (path, content) in aux_files {
377 0 : if path.starts_with("pg_replslot") {
378 : // Do not create LR slots at standby because they are not used but prevent WAL truncation
379 0 : if self.replica {
380 0 : continue;
381 0 : }
382 0 : let offs = pg_constants::REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN;
383 0 : let restart_lsn = Lsn(u64::from_le_bytes(
384 0 : content[offs..offs + 8].try_into().unwrap(),
385 0 : ));
386 0 : info!("Replication slot {} restart LSN={}", path, restart_lsn);
387 0 : min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn);
388 0 : } else if path == "pg_logical/replorigin_checkpoint" {
389 : // replorigin_checkoint is written only on compute shutdown, so it contains
390 : // deteriorated values. So we generate our own version of this file for the particular LSN
391 : // based on information about replorigins extracted from transaction commit records.
392 : // In future we will not generate AUX record for "pg_logical/replorigin_checkpoint" at all,
393 : // but now we should handle (skip) it for backward compatibility.
394 0 : continue;
395 0 : }
396 0 : let header = new_tar_header(&path, content.len() as u64)?;
397 0 : self.ar
398 0 : .append(&header, &*content)
399 0 : .await
400 0 : .context("could not add aux file to basebackup tarball")?;
401 : }
402 :
403 0 : if min_restart_lsn != Lsn::MAX {
404 0 : info!(
405 0 : "Min restart LSN for logical replication is {}",
406 : min_restart_lsn
407 : );
408 0 : let data = min_restart_lsn.0.to_le_bytes();
409 0 : let header = new_tar_header("restart.lsn", data.len() as u64)?;
410 0 : self.ar
411 0 : .append(&header, &data[..])
412 0 : .await
413 0 : .context("could not add restart.lsn file to basebackup tarball")?;
414 0 : }
415 0 : for xid in self
416 0 : .timeline
417 0 : .list_twophase_files(self.lsn, self.ctx)
418 0 : .await
419 0 : .map_err(|e| BasebackupError::Server(e.into()))?
420 : {
421 0 : self.add_twophase_file(xid).await?;
422 : }
423 0 : let repl_origins = self
424 0 : .timeline
425 0 : .get_replorigins(self.lsn, self.ctx)
426 0 : .await
427 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
428 0 : let n_origins = repl_origins.len();
429 0 : if n_origins != 0 {
430 : //
431 : // Construct "pg_logical/replorigin_checkpoint" file based on information about replication origins
432 : // extracted from transaction commit record. We are using this file to pass information about replication
433 : // origins to compute to allow logical replication to restart from proper point.
434 : //
435 0 : let mut content = Vec::with_capacity(n_origins * 16 + 8);
436 0 : content.extend_from_slice(&pg_constants::REPLICATION_STATE_MAGIC.to_le_bytes());
437 0 : for (origin_id, origin_lsn) in repl_origins {
438 0 : content.extend_from_slice(&origin_id.to_le_bytes());
439 0 : content.extend_from_slice(&[0u8; 6]); // align to 8 bytes
440 0 : content.extend_from_slice(&origin_lsn.0.to_le_bytes());
441 0 : }
442 0 : let crc32 = crc32c::crc32c(&content);
443 0 : content.extend_from_slice(&crc32.to_le_bytes());
444 0 : let header = new_tar_header("pg_logical/replorigin_checkpoint", content.len() as u64)?;
445 0 : self.ar.append(&header, &*content).await.context(
446 0 : "could not add pg_logical/replorigin_checkpoint file to basebackup tarball",
447 0 : )?;
448 0 : }
449 :
450 0 : fail_point!("basebackup-before-control-file", |_| {
451 0 : Err(BasebackupError::Server(anyhow!(
452 0 : "failpoint basebackup-before-control-file"
453 0 : )))
454 0 : });
455 :
456 : // Generate pg_control and bootstrap WAL segment.
457 0 : self.add_pgcontrol_file().await?;
458 0 : self.ar.finish().await.map_err(BasebackupError::Client)?;
459 0 : debug!("all tarred up!");
460 0 : Ok(())
461 0 : }
462 :
463 : /// Add contents of relfilenode `src`, naming it as `dst`.
464 0 : async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> Result<(), BasebackupError> {
465 0 : let nblocks = self
466 0 : .timeline
467 0 : .get_rel_size(src, Version::Lsn(self.lsn), self.ctx)
468 0 : .await
469 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
470 :
471 : // If the relation is empty, create an empty file
472 0 : if nblocks == 0 {
473 0 : let file_name = dst.to_segfile_name(0);
474 0 : let header = new_tar_header(&file_name, 0)?;
475 0 : self.ar
476 0 : .append(&header, &mut io::empty())
477 0 : .await
478 0 : .map_err(BasebackupError::Client)?;
479 0 : return Ok(());
480 0 : }
481 0 :
482 0 : // Add a file for each chunk of blocks (aka segment)
483 0 : let mut startblk = 0;
484 0 : let mut seg = 0;
485 0 : while startblk < nblocks {
486 0 : let endblk = std::cmp::min(startblk + RELSEG_SIZE, nblocks);
487 0 :
488 0 : let mut segment_data: Vec<u8> = vec![];
489 0 : for blknum in startblk..endblk {
490 0 : let img = self
491 0 : .timeline
492 0 : .get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), self.ctx)
493 0 : .await
494 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
495 0 : segment_data.extend_from_slice(&img[..]);
496 : }
497 :
498 0 : let file_name = dst.to_segfile_name(seg as u32);
499 0 : let header = new_tar_header(&file_name, segment_data.len() as u64)?;
500 0 : self.ar
501 0 : .append(&header, segment_data.as_slice())
502 0 : .await
503 0 : .map_err(BasebackupError::Client)?;
504 :
505 0 : seg += 1;
506 0 : startblk = endblk;
507 : }
508 :
509 0 : Ok(())
510 0 : }
511 :
512 : //
513 : // Include database/tablespace directories.
514 : //
515 : // Each directory contains a PG_VERSION file, and the default database
516 : // directories also contain pg_filenode.map files.
517 : //
518 0 : async fn add_dbdir(
519 0 : &mut self,
520 0 : spcnode: u32,
521 0 : dbnode: u32,
522 0 : has_relmap_file: bool,
523 0 : ) -> Result<(), BasebackupError> {
524 0 : let relmap_img = if has_relmap_file {
525 0 : let img = self
526 0 : .timeline
527 0 : .get_relmap_file(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
528 0 : .await
529 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
530 :
531 0 : if img.len()
532 0 : != dispatch_pgversion!(self.timeline.pg_version, pgv::bindings::SIZEOF_RELMAPFILE)
533 : {
534 0 : return Err(BasebackupError::Server(anyhow!(
535 0 : "img.len() != SIZE_OF_RELMAPFILE, img.len()={}",
536 0 : img.len(),
537 0 : )));
538 0 : }
539 0 :
540 0 : Some(img)
541 : } else {
542 0 : None
543 : };
544 :
545 0 : if spcnode == GLOBALTABLESPACE_OID {
546 0 : let pg_version_str = match self.timeline.pg_version {
547 0 : 14 | 15 => self.timeline.pg_version.to_string(),
548 0 : ver => format!("{ver}\x0A"),
549 : };
550 0 : let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
551 0 : self.ar
552 0 : .append(&header, pg_version_str.as_bytes())
553 0 : .await
554 0 : .map_err(BasebackupError::Client)?;
555 :
556 0 : info!("timeline.pg_version {}", self.timeline.pg_version);
557 :
558 0 : if let Some(img) = relmap_img {
559 : // filenode map for global tablespace
560 0 : let header = new_tar_header("global/pg_filenode.map", img.len() as u64)?;
561 0 : self.ar
562 0 : .append(&header, &img[..])
563 0 : .await
564 0 : .map_err(BasebackupError::Client)?;
565 : } else {
566 0 : warn!("global/pg_filenode.map is missing");
567 : }
568 : } else {
569 : // User defined tablespaces are not supported. However, as
570 : // a special case, if a tablespace/db directory is
571 : // completely empty, we can leave it out altogether. This
572 : // makes taking a base backup after the 'tablespace'
573 : // regression test pass, because the test drops the
574 : // created tablespaces after the tests.
575 : //
576 : // FIXME: this wouldn't be necessary, if we handled
577 : // XLOG_TBLSPC_DROP records. But we probably should just
578 : // throw an error on CREATE TABLESPACE in the first place.
579 0 : if !has_relmap_file
580 0 : && self
581 0 : .timeline
582 0 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
583 0 : .await
584 0 : .map_err(|e| BasebackupError::Server(e.into()))?
585 0 : .is_empty()
586 : {
587 0 : return Ok(());
588 0 : }
589 0 : // User defined tablespaces are not supported
590 0 : if spcnode != DEFAULTTABLESPACE_OID {
591 0 : return Err(BasebackupError::Server(anyhow!(
592 0 : "spcnode != DEFAULTTABLESPACE_OID, spcnode={spcnode}"
593 0 : )));
594 0 : }
595 0 :
596 0 : // Append dir path for each database
597 0 : let path = format!("base/{}", dbnode);
598 0 : let header = new_tar_header_dir(&path)?;
599 0 : self.ar
600 0 : .append(&header, &mut io::empty())
601 0 : .await
602 0 : .map_err(BasebackupError::Client)?;
603 :
604 0 : if let Some(img) = relmap_img {
605 0 : let dst_path = format!("base/{}/PG_VERSION", dbnode);
606 :
607 0 : let pg_version_str = match self.timeline.pg_version {
608 0 : 14 | 15 => self.timeline.pg_version.to_string(),
609 0 : ver => format!("{ver}\x0A"),
610 : };
611 0 : let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
612 0 : self.ar
613 0 : .append(&header, pg_version_str.as_bytes())
614 0 : .await
615 0 : .map_err(BasebackupError::Client)?;
616 :
617 0 : let relmap_path = format!("base/{}/pg_filenode.map", dbnode);
618 0 : let header = new_tar_header(&relmap_path, img.len() as u64)?;
619 0 : self.ar
620 0 : .append(&header, &img[..])
621 0 : .await
622 0 : .map_err(BasebackupError::Client)?;
623 0 : }
624 : };
625 0 : Ok(())
626 0 : }
627 :
628 : //
629 : // Extract twophase state files
630 : //
631 0 : async fn add_twophase_file(&mut self, xid: u64) -> Result<(), BasebackupError> {
632 0 : let img = self
633 0 : .timeline
634 0 : .get_twophase_file(xid, self.lsn, self.ctx)
635 0 : .await
636 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
637 :
638 0 : let mut buf = BytesMut::new();
639 0 : buf.extend_from_slice(&img[..]);
640 0 : let crc = crc32c::crc32c(&img[..]);
641 0 : buf.put_u32_le(crc);
642 0 : let path = if self.timeline.pg_version < 17 {
643 0 : format!("pg_twophase/{:>08X}", xid)
644 : } else {
645 0 : format!("pg_twophase/{:>016X}", xid)
646 : };
647 0 : let header = new_tar_header(&path, buf.len() as u64)?;
648 0 : self.ar
649 0 : .append(&header, &buf[..])
650 0 : .await
651 0 : .map_err(BasebackupError::Client)?;
652 :
653 0 : Ok(())
654 0 : }
655 :
656 : //
657 : // Add generated pg_control file and bootstrap WAL segment.
658 : // Also send zenith.signal file with extra bootstrap data.
659 : //
660 0 : async fn add_pgcontrol_file(&mut self) -> Result<(), BasebackupError> {
661 0 : // add zenith.signal file
662 0 : let mut zenith_signal = String::new();
663 0 : if self.prev_record_lsn == Lsn(0) {
664 0 : if self.timeline.is_ancestor_lsn(self.lsn) {
665 0 : write!(zenith_signal, "PREV LSN: none")
666 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
667 : } else {
668 0 : write!(zenith_signal, "PREV LSN: invalid")
669 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
670 : }
671 : } else {
672 0 : write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn)
673 0 : .map_err(|e| BasebackupError::Server(e.into()))?;
674 : }
675 0 : self.ar
676 0 : .append(
677 0 : &new_tar_header("zenith.signal", zenith_signal.len() as u64)?,
678 0 : zenith_signal.as_bytes(),
679 : )
680 0 : .await
681 0 : .map_err(BasebackupError::Client)?;
682 :
683 0 : let checkpoint_bytes = self
684 0 : .timeline
685 0 : .get_checkpoint(self.lsn, self.ctx)
686 0 : .await
687 0 : .context("failed to get checkpoint bytes")?;
688 0 : let pg_control_bytes = self
689 0 : .timeline
690 0 : .get_control_file(self.lsn, self.ctx)
691 0 : .await
692 0 : .context("failed get control bytes")?;
693 :
694 0 : let (pg_control_bytes, system_identifier) = postgres_ffi::generate_pg_control(
695 0 : &pg_control_bytes,
696 0 : &checkpoint_bytes,
697 0 : self.lsn,
698 0 : self.timeline.pg_version,
699 0 : )?;
700 :
701 : //send pg_control
702 0 : let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
703 0 : self.ar
704 0 : .append(&header, &pg_control_bytes[..])
705 0 : .await
706 0 : .map_err(BasebackupError::Client)?;
707 :
708 : //send wal segment
709 0 : let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE);
710 0 : let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE);
711 0 : let wal_file_path = format!("pg_wal/{}", wal_file_name);
712 0 : let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?;
713 :
714 0 : let wal_seg = postgres_ffi::generate_wal_segment(
715 0 : segno,
716 0 : system_identifier,
717 0 : self.timeline.pg_version,
718 0 : self.lsn,
719 0 : )
720 0 : .map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
721 0 : if wal_seg.len() != WAL_SEGMENT_SIZE {
722 0 : return Err(BasebackupError::Server(anyhow!(
723 0 : "wal_seg.len() != WAL_SEGMENT_SIZE, wal_seg.len()={}",
724 0 : wal_seg.len()
725 0 : )));
726 0 : }
727 0 : self.ar
728 0 : .append(&header, &wal_seg[..])
729 0 : .await
730 0 : .map_err(BasebackupError::Client)?;
731 0 : Ok(())
732 0 : }
733 : }
734 :
735 : //
736 : // Create new tarball entry header
737 : //
738 0 : fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
739 0 : let mut header = Header::new_gnu();
740 0 : header.set_size(size);
741 0 : header.set_path(path)?;
742 0 : header.set_mode(0b110000000); // -rw-------
743 0 : header.set_mtime(
744 0 : // use currenttime as last modified time
745 0 : SystemTime::now()
746 0 : .duration_since(SystemTime::UNIX_EPOCH)
747 0 : .unwrap()
748 0 : .as_secs(),
749 0 : );
750 0 : header.set_cksum();
751 0 : Ok(header)
752 0 : }
753 :
754 0 : fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
755 0 : let mut header = Header::new_gnu();
756 0 : header.set_size(0);
757 0 : header.set_path(path)?;
758 0 : header.set_mode(0o755); // -rw-------
759 0 : header.set_entry_type(EntryType::dir());
760 0 : header.set_mtime(
761 0 : // use currenttime as last modified time
762 0 : SystemTime::now()
763 0 : .duration_since(SystemTime::UNIX_EPOCH)
764 0 : .unwrap()
765 0 : .as_secs(),
766 0 : );
767 0 : header.set_cksum();
768 0 : Ok(header)
769 0 : }
|