LCOV - code coverage report
Current view: top level - pageserver/src - basebackup.rs (source / functions) Coverage Total Hit
Test: a43a77853355b937a79c57b07a8f05607cf29e6c.info Lines: 0.0 % 476 0
Test Date: 2024-09-19 12:04:32 Functions: 0.0 % 79 0

            Line data    Source code
       1              : //!
       2              : //! Generate a tarball with files needed to bootstrap ComputeNode.
       3              : //!
       4              : //! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
       5              : //! It could use a better name.
       6              : //!
       7              : //! Stateless Postgres compute node is launched by sending a tarball
       8              : //! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
       9              : //! generated pg_control and dummy segment of WAL.
      10              : //! This module is responsible for creation of such tarball
      11              : //! from data stored in object storage.
      12              : //!
      13              : use anyhow::{anyhow, Context};
      14              : use bytes::{BufMut, Bytes, BytesMut};
      15              : use fail::fail_point;
      16              : use pageserver_api::key::Key;
      17              : use postgres_ffi::pg_constants;
      18              : use std::fmt::Write as FmtWrite;
      19              : use std::time::SystemTime;
      20              : use tokio::io;
      21              : use tokio::io::AsyncWrite;
      22              : use tracing::*;
      23              : 
      24              : use tokio_tar::{Builder, EntryType, Header};
      25              : 
      26              : use crate::context::RequestContext;
      27              : use crate::pgdatadir_mapping::Version;
      28              : use crate::tenant::Timeline;
      29              : use pageserver_api::reltag::{RelTag, SlruKind};
      30              : 
      31              : use postgres_ffi::dispatch_pgversion;
      32              : use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
      33              : use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PG_HBA};
      34              : use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
      35              : use postgres_ffi::XLogFileName;
      36              : use postgres_ffi::PG_TLI;
      37              : use postgres_ffi::{BLCKSZ, RELSEG_SIZE, WAL_SEGMENT_SIZE};
      38              : use utils::lsn::Lsn;
      39              : 
      40            0 : #[derive(Debug, thiserror::Error)]
      41              : pub enum BasebackupError {
      42              :     #[error("basebackup pageserver error {0:#}")]
      43              :     Server(#[from] anyhow::Error),
      44              :     #[error("basebackup client error {0:#}")]
      45              :     Client(#[source] io::Error),
      46              : }
      47              : 
      48              : /// Create basebackup with non-rel data in it.
      49              : /// Only include relational data if 'full_backup' is true.
      50              : ///
      51              : /// Currently we use empty 'req_lsn' in two cases:
      52              : ///  * During the basebackup right after timeline creation
      53              : ///  * When working without safekeepers. In this situation it is important to match the lsn
      54              : ///    we are taking basebackup on with the lsn that is used in pageserver's walreceiver
      55              : ///    to start the replication.
      56            0 : pub async fn send_basebackup_tarball<'a, W>(
      57            0 :     write: &'a mut W,
      58            0 :     timeline: &'a Timeline,
      59            0 :     req_lsn: Option<Lsn>,
      60            0 :     prev_lsn: Option<Lsn>,
      61            0 :     full_backup: bool,
      62            0 :     ctx: &'a RequestContext,
      63            0 : ) -> Result<(), BasebackupError>
      64            0 : where
      65            0 :     W: AsyncWrite + Send + Sync + Unpin,
      66            0 : {
      67              :     // Compute postgres doesn't have any previous WAL files, but the first
      68              :     // record that it's going to write needs to include the LSN of the
      69              :     // previous record (xl_prev). We include prev_record_lsn in the
      70              :     // "zenith.signal" file, so that postgres can read it during startup.
      71              :     //
      72              :     // We don't keep full history of record boundaries in the page server,
      73              :     // however, only the predecessor of the latest record on each
      74              :     // timeline. So we can only provide prev_record_lsn when you take a
      75              :     // base backup at the end of the timeline, i.e. at last_record_lsn.
      76              :     // Even at the end of the timeline, we sometimes don't have a valid
      77              :     // prev_lsn value; that happens if the timeline was just branched from
      78              :     // an old LSN and it doesn't have any WAL of its own yet. We will set
      79              :     // prev_lsn to Lsn(0) if we cannot provide the correct value.
      80            0 :     let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
      81              :         // Backup was requested at a particular LSN. The caller should've
      82              :         // already checked that it's a valid LSN.
      83              : 
      84              :         // If the requested point is the end of the timeline, we can
      85              :         // provide prev_lsn. (get_last_record_rlsn() might return it as
      86              :         // zero, though, if no WAL has been generated on this timeline
      87              :         // yet.)
      88            0 :         let end_of_timeline = timeline.get_last_record_rlsn();
      89            0 :         if req_lsn == end_of_timeline.last {
      90            0 :             (end_of_timeline.prev, req_lsn)
      91              :         } else {
      92            0 :             (Lsn(0), req_lsn)
      93              :         }
      94              :     } else {
      95              :         // Backup was requested at end of the timeline.
      96            0 :         let end_of_timeline = timeline.get_last_record_rlsn();
      97            0 :         (end_of_timeline.prev, end_of_timeline.last)
      98              :     };
      99              : 
     100              :     // Consolidate the derived and the provided prev_lsn values
     101            0 :     let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn {
     102            0 :         if backup_prev != Lsn(0) && backup_prev != provided_prev_lsn {
     103            0 :             return Err(BasebackupError::Server(anyhow!(
     104            0 :                 "backup_prev {backup_prev} != provided_prev_lsn {provided_prev_lsn}"
     105            0 :             )));
     106            0 :         }
     107            0 :         provided_prev_lsn
     108              :     } else {
     109            0 :         backup_prev
     110              :     };
     111              : 
     112            0 :     info!(
     113            0 :         "taking basebackup lsn={}, prev_lsn={} (full_backup={})",
     114              :         backup_lsn, prev_lsn, full_backup
     115              :     );
     116              : 
     117            0 :     let basebackup = Basebackup {
     118            0 :         ar: Builder::new_non_terminated(write),
     119            0 :         timeline,
     120            0 :         lsn: backup_lsn,
     121            0 :         prev_record_lsn: prev_lsn,
     122            0 :         full_backup,
     123            0 :         ctx,
     124            0 :     };
     125            0 :     basebackup
     126            0 :         .send_tarball()
     127            0 :         .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn))
     128            0 :         .await
     129            0 : }
     130              : 
     131              : /// This is short-living object only for the time of tarball creation,
     132              : /// created mostly to avoid passing a lot of parameters between various functions
     133              : /// used for constructing tarball.
     134              : struct Basebackup<'a, W>
     135              : where
     136              :     W: AsyncWrite + Send + Sync + Unpin,
     137              : {
     138              :     ar: Builder<&'a mut W>,
     139              :     timeline: &'a Timeline,
     140              :     lsn: Lsn,
     141              :     prev_record_lsn: Lsn,
     142              :     full_backup: bool,
     143              :     ctx: &'a RequestContext,
     144              : }
     145              : 
     146              : /// A sink that accepts SLRU blocks ordered by key and forwards
     147              : /// full segments to the archive.
     148              : struct SlruSegmentsBuilder<'a, 'b, W>
     149              : where
     150              :     W: AsyncWrite + Send + Sync + Unpin,
     151              : {
     152              :     ar: &'a mut Builder<&'b mut W>,
     153              :     buf: Vec<u8>,
     154              :     current_segment: Option<(SlruKind, u32)>,
     155              :     total_blocks: usize,
     156              : }
     157              : 
     158              : impl<'a, 'b, W> SlruSegmentsBuilder<'a, 'b, W>
     159              : where
     160              :     W: AsyncWrite + Send + Sync + Unpin,
     161              : {
     162            0 :     fn new(ar: &'a mut Builder<&'b mut W>) -> Self {
     163            0 :         Self {
     164            0 :             ar,
     165            0 :             buf: Vec::new(),
     166            0 :             current_segment: None,
     167            0 :             total_blocks: 0,
     168            0 :         }
     169            0 :     }
     170              : 
     171            0 :     async fn add_block(&mut self, key: &Key, block: Bytes) -> Result<(), BasebackupError> {
     172            0 :         let (kind, segno, _) = key.to_slru_block()?;
     173              : 
     174            0 :         match kind {
     175              :             SlruKind::Clog => {
     176            0 :                 if !(block.len() == BLCKSZ as usize || block.len() == BLCKSZ as usize + 8) {
     177            0 :                     return Err(BasebackupError::Server(anyhow!(
     178            0 :                         "invalid SlruKind::Clog record: block.len()={}",
     179            0 :                         block.len()
     180            0 :                     )));
     181            0 :                 }
     182              :             }
     183              :             SlruKind::MultiXactMembers | SlruKind::MultiXactOffsets => {
     184            0 :                 if block.len() != BLCKSZ as usize {
     185            0 :                     return Err(BasebackupError::Server(anyhow!(
     186            0 :                         "invalid {:?} record: block.len()={}",
     187            0 :                         kind,
     188            0 :                         block.len()
     189            0 :                     )));
     190            0 :                 }
     191              :             }
     192              :         }
     193              : 
     194            0 :         let segment = (kind, segno);
     195            0 :         match self.current_segment {
     196            0 :             None => {
     197            0 :                 self.current_segment = Some(segment);
     198            0 :                 self.buf
     199            0 :                     .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
     200            0 :             }
     201            0 :             Some(current_seg) if current_seg == segment => {
     202            0 :                 self.buf
     203            0 :                     .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
     204            0 :             }
     205              :             Some(_) => {
     206            0 :                 self.flush().await?;
     207              : 
     208            0 :                 self.current_segment = Some(segment);
     209            0 :                 self.buf
     210            0 :                     .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
     211              :             }
     212              :         }
     213              : 
     214            0 :         Ok(())
     215            0 :     }
     216              : 
     217            0 :     async fn flush(&mut self) -> Result<(), BasebackupError> {
     218            0 :         let nblocks = self.buf.len() / BLCKSZ as usize;
     219            0 :         let (kind, segno) = self.current_segment.take().unwrap();
     220            0 :         let segname = format!("{}/{:>04X}", kind.to_str(), segno);
     221            0 :         let header = new_tar_header(&segname, self.buf.len() as u64)?;
     222            0 :         self.ar
     223            0 :             .append(&header, self.buf.as_slice())
     224            0 :             .await
     225            0 :             .map_err(BasebackupError::Client)?;
     226              : 
     227            0 :         self.total_blocks += nblocks;
     228            0 :         debug!("Added to basebackup slru {} relsize {}", segname, nblocks);
     229              : 
     230            0 :         self.buf.clear();
     231            0 : 
     232            0 :         Ok(())
     233            0 :     }
     234              : 
     235            0 :     async fn finish(mut self) -> Result<(), BasebackupError> {
     236            0 :         let res = if self.current_segment.is_none() || self.buf.is_empty() {
     237            0 :             Ok(())
     238              :         } else {
     239            0 :             self.flush().await
     240              :         };
     241              : 
     242            0 :         info!("Collected {} SLRU blocks", self.total_blocks);
     243              : 
     244            0 :         res
     245            0 :     }
     246              : }
     247              : 
     248              : impl<'a, W> Basebackup<'a, W>
     249              : where
     250              :     W: AsyncWrite + Send + Sync + Unpin,
     251              : {
     252            0 :     async fn send_tarball(mut self) -> Result<(), BasebackupError> {
     253              :         // TODO include checksum
     254              : 
     255            0 :         let lazy_slru_download = self.timeline.get_lazy_slru_download() && !self.full_backup;
     256              : 
     257            0 :         let pgversion = self.timeline.pg_version;
     258            0 :         let subdirs = dispatch_pgversion!(pgversion, &pgv::bindings::PGDATA_SUBDIRS[..]);
     259              : 
     260              :         // Create pgdata subdirs structure
     261            0 :         for dir in subdirs.iter() {
     262            0 :             let header = new_tar_header_dir(dir)?;
     263            0 :             self.ar
     264            0 :                 .append(&header, &mut io::empty())
     265            0 :                 .await
     266            0 :                 .context("could not add directory to basebackup tarball")?;
     267              :         }
     268              : 
     269              :         // Send config files.
     270            0 :         for filepath in PGDATA_SPECIAL_FILES.iter() {
     271            0 :             if *filepath == "pg_hba.conf" {
     272            0 :                 let data = PG_HBA.as_bytes();
     273            0 :                 let header = new_tar_header(filepath, data.len() as u64)?;
     274            0 :                 self.ar
     275            0 :                     .append(&header, data)
     276            0 :                     .await
     277            0 :                     .context("could not add config file to basebackup tarball")?;
     278              :             } else {
     279            0 :                 let header = new_tar_header(filepath, 0)?;
     280            0 :                 self.ar
     281            0 :                     .append(&header, &mut io::empty())
     282            0 :                     .await
     283            0 :                     .context("could not add config file to basebackup tarball")?;
     284              :             }
     285              :         }
     286            0 :         if !lazy_slru_download {
     287              :             // Gather non-relational files from object storage pages.
     288            0 :             let slru_partitions = self
     289            0 :                 .timeline
     290            0 :                 .get_slru_keyspace(Version::Lsn(self.lsn), self.ctx)
     291            0 :                 .await
     292            0 :                 .map_err(|e| BasebackupError::Server(e.into()))?
     293            0 :                 .partition(
     294            0 :                     self.timeline.get_shard_identity(),
     295            0 :                     Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64,
     296            0 :                 );
     297            0 : 
     298            0 :             let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar);
     299              : 
     300            0 :             for part in slru_partitions.parts {
     301            0 :                 let blocks = self
     302            0 :                     .timeline
     303            0 :                     .get_vectored(part, self.lsn, self.ctx)
     304            0 :                     .await
     305            0 :                     .map_err(|e| BasebackupError::Server(e.into()))?;
     306              : 
     307            0 :                 for (key, block) in blocks {
     308            0 :                     let block = block.map_err(|e| BasebackupError::Server(e.into()))?;
     309            0 :                     slru_builder.add_block(&key, block).await?;
     310              :                 }
     311              :             }
     312            0 :             slru_builder.finish().await?;
     313            0 :         }
     314              : 
     315            0 :         let mut min_restart_lsn: Lsn = Lsn::MAX;
     316              :         // Create tablespace directories
     317            0 :         for ((spcnode, dbnode), has_relmap_file) in self
     318            0 :             .timeline
     319            0 :             .list_dbdirs(self.lsn, self.ctx)
     320            0 :             .await
     321            0 :             .map_err(|e| BasebackupError::Server(e.into()))?
     322              :         {
     323            0 :             self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
     324              : 
     325              :             // If full backup is requested, include all relation files.
     326              :             // Otherwise only include init forks of unlogged relations.
     327            0 :             let rels = self
     328            0 :                 .timeline
     329            0 :                 .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
     330            0 :                 .await
     331            0 :                 .map_err(|e| BasebackupError::Server(e.into()))?;
     332            0 :             for &rel in rels.iter() {
     333              :                 // Send init fork as main fork to provide well formed empty
     334              :                 // contents of UNLOGGED relations. Postgres copies it in
     335              :                 // `reinit.c` during recovery.
     336            0 :                 if rel.forknum == INIT_FORKNUM {
     337              :                     // I doubt we need _init fork itself, but having it at least
     338              :                     // serves as a marker relation is unlogged.
     339            0 :                     self.add_rel(rel, rel).await?;
     340            0 :                     self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
     341            0 :                     continue;
     342            0 :                 }
     343            0 : 
     344            0 :                 if self.full_backup {
     345            0 :                     if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
     346              :                     {
     347              :                         // skip this, will include it when we reach the init fork
     348            0 :                         continue;
     349            0 :                     }
     350            0 :                     self.add_rel(rel, rel).await?;
     351            0 :                 }
     352              :             }
     353              :         }
     354              : 
     355            0 :         for (path, content) in self
     356            0 :             .timeline
     357            0 :             .list_aux_files(self.lsn, self.ctx)
     358            0 :             .await
     359            0 :             .map_err(|e| BasebackupError::Server(e.into()))?
     360              :         {
     361            0 :             if path.starts_with("pg_replslot") {
     362            0 :                 let offs = pg_constants::REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN;
     363            0 :                 let restart_lsn = Lsn(u64::from_le_bytes(
     364            0 :                     content[offs..offs + 8].try_into().unwrap(),
     365            0 :                 ));
     366            0 :                 info!("Replication slot {} restart LSN={}", path, restart_lsn);
     367            0 :                 min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn);
     368            0 :             } else if path == "pg_logical/replorigin_checkpoint" {
     369              :                 // replorigin_checkoint is written only on compute shutdown, so it contains
     370              :                 // deteriorated values. So we generate our own version of this file for the particular LSN
     371              :                 // based on information about replorigins extracted from transaction commit records.
     372              :                 // In future we will not generate AUX record for "pg_logical/replorigin_checkpoint" at all,
     373              :                 // but now we should handle (skip) it for backward compatibility.
     374            0 :                 continue;
     375            0 :             }
     376            0 :             let header = new_tar_header(&path, content.len() as u64)?;
     377            0 :             self.ar
     378            0 :                 .append(&header, &*content)
     379            0 :                 .await
     380            0 :                 .context("could not add aux file to basebackup tarball")?;
     381              :         }
     382              : 
     383            0 :         if min_restart_lsn != Lsn::MAX {
     384            0 :             info!(
     385            0 :                 "Min restart LSN for logical replication is {}",
     386              :                 min_restart_lsn
     387              :             );
     388            0 :             let data = min_restart_lsn.0.to_le_bytes();
     389            0 :             let header = new_tar_header("restart.lsn", data.len() as u64)?;
     390            0 :             self.ar
     391            0 :                 .append(&header, &data[..])
     392            0 :                 .await
     393            0 :                 .context("could not add restart.lsn file to basebackup tarball")?;
     394            0 :         }
     395            0 :         for xid in self
     396            0 :             .timeline
     397            0 :             .list_twophase_files(self.lsn, self.ctx)
     398            0 :             .await
     399            0 :             .map_err(|e| BasebackupError::Server(e.into()))?
     400              :         {
     401            0 :             self.add_twophase_file(xid).await?;
     402              :         }
     403            0 :         let repl_origins = self
     404            0 :             .timeline
     405            0 :             .get_replorigins(self.lsn, self.ctx)
     406            0 :             .await
     407            0 :             .map_err(|e| BasebackupError::Server(e.into()))?;
     408            0 :         let n_origins = repl_origins.len();
     409            0 :         if n_origins != 0 {
     410              :             //
     411              :             // Construct "pg_logical/replorigin_checkpoint" file based on information about replication origins
     412              :             // extracted from transaction commit record. We are using this file to pass information about replication
     413              :             // origins to compute to allow logical replication to restart from proper point.
     414              :             //
     415            0 :             let mut content = Vec::with_capacity(n_origins * 16 + 8);
     416            0 :             content.extend_from_slice(&pg_constants::REPLICATION_STATE_MAGIC.to_le_bytes());
     417            0 :             for (origin_id, origin_lsn) in repl_origins {
     418            0 :                 content.extend_from_slice(&origin_id.to_le_bytes());
     419            0 :                 content.extend_from_slice(&[0u8; 6]); // align to 8 bytes
     420            0 :                 content.extend_from_slice(&origin_lsn.0.to_le_bytes());
     421            0 :             }
     422            0 :             let crc32 = crc32c::crc32c(&content);
     423            0 :             content.extend_from_slice(&crc32.to_le_bytes());
     424            0 :             let header = new_tar_header("pg_logical/replorigin_checkpoint", content.len() as u64)?;
     425            0 :             self.ar.append(&header, &*content).await.context(
     426            0 :                 "could not add pg_logical/replorigin_checkpoint file to basebackup tarball",
     427            0 :             )?;
     428            0 :         }
     429              : 
     430            0 :         fail_point!("basebackup-before-control-file", |_| {
     431            0 :             Err(BasebackupError::Server(anyhow!(
     432            0 :                 "failpoint basebackup-before-control-file"
     433            0 :             )))
     434            0 :         });
     435              : 
     436              :         // Generate pg_control and bootstrap WAL segment.
     437            0 :         self.add_pgcontrol_file().await?;
     438            0 :         self.ar.finish().await.map_err(BasebackupError::Client)?;
     439            0 :         debug!("all tarred up!");
     440            0 :         Ok(())
     441            0 :     }
     442              : 
     443              :     /// Add contents of relfilenode `src`, naming it as `dst`.
     444            0 :     async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> Result<(), BasebackupError> {
     445            0 :         let nblocks = self
     446            0 :             .timeline
     447            0 :             .get_rel_size(src, Version::Lsn(self.lsn), self.ctx)
     448            0 :             .await
     449            0 :             .map_err(|e| BasebackupError::Server(e.into()))?;
     450              : 
     451              :         // If the relation is empty, create an empty file
     452            0 :         if nblocks == 0 {
     453            0 :             let file_name = dst.to_segfile_name(0);
     454            0 :             let header = new_tar_header(&file_name, 0)?;
     455            0 :             self.ar
     456            0 :                 .append(&header, &mut io::empty())
     457            0 :                 .await
     458            0 :                 .map_err(BasebackupError::Client)?;
     459            0 :             return Ok(());
     460            0 :         }
     461            0 : 
     462            0 :         // Add a file for each chunk of blocks (aka segment)
     463            0 :         let mut startblk = 0;
     464            0 :         let mut seg = 0;
     465            0 :         while startblk < nblocks {
     466            0 :             let endblk = std::cmp::min(startblk + RELSEG_SIZE, nblocks);
     467            0 : 
     468            0 :             let mut segment_data: Vec<u8> = vec![];
     469            0 :             for blknum in startblk..endblk {
     470            0 :                 let img = self
     471            0 :                     .timeline
     472            0 :                     .get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), self.ctx)
     473            0 :                     .await
     474            0 :                     .map_err(|e| BasebackupError::Server(e.into()))?;
     475            0 :                 segment_data.extend_from_slice(&img[..]);
     476              :             }
     477              : 
     478            0 :             let file_name = dst.to_segfile_name(seg as u32);
     479            0 :             let header = new_tar_header(&file_name, segment_data.len() as u64)?;
     480            0 :             self.ar
     481            0 :                 .append(&header, segment_data.as_slice())
     482            0 :                 .await
     483            0 :                 .map_err(BasebackupError::Client)?;
     484              : 
     485            0 :             seg += 1;
     486            0 :             startblk = endblk;
     487              :         }
     488              : 
     489            0 :         Ok(())
     490            0 :     }
     491              : 
     492              :     //
     493              :     // Include database/tablespace directories.
     494              :     //
     495              :     // Each directory contains a PG_VERSION file, and the default database
     496              :     // directories also contain pg_filenode.map files.
     497              :     //
     498            0 :     async fn add_dbdir(
     499            0 :         &mut self,
     500            0 :         spcnode: u32,
     501            0 :         dbnode: u32,
     502            0 :         has_relmap_file: bool,
     503            0 :     ) -> Result<(), BasebackupError> {
     504            0 :         let relmap_img = if has_relmap_file {
     505            0 :             let img = self
     506            0 :                 .timeline
     507            0 :                 .get_relmap_file(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
     508            0 :                 .await
     509            0 :                 .map_err(|e| BasebackupError::Server(e.into()))?;
     510              : 
     511            0 :             if img.len()
     512            0 :                 != dispatch_pgversion!(self.timeline.pg_version, pgv::bindings::SIZEOF_RELMAPFILE)
     513              :             {
     514            0 :                 return Err(BasebackupError::Server(anyhow!(
     515            0 :                     "img.len() != SIZE_OF_RELMAPFILE, img.len()={}",
     516            0 :                     img.len(),
     517            0 :                 )));
     518            0 :             }
     519            0 : 
     520            0 :             Some(img)
     521              :         } else {
     522            0 :             None
     523              :         };
     524              : 
     525            0 :         if spcnode == GLOBALTABLESPACE_OID {
     526            0 :             let pg_version_str = match self.timeline.pg_version {
     527            0 :                 14 | 15 => self.timeline.pg_version.to_string(),
     528            0 :                 ver => format!("{ver}\x0A"),
     529              :             };
     530            0 :             let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
     531            0 :             self.ar
     532            0 :                 .append(&header, pg_version_str.as_bytes())
     533            0 :                 .await
     534            0 :                 .map_err(BasebackupError::Client)?;
     535              : 
     536            0 :             info!("timeline.pg_version {}", self.timeline.pg_version);
     537              : 
     538            0 :             if let Some(img) = relmap_img {
     539              :                 // filenode map for global tablespace
     540            0 :                 let header = new_tar_header("global/pg_filenode.map", img.len() as u64)?;
     541            0 :                 self.ar
     542            0 :                     .append(&header, &img[..])
     543            0 :                     .await
     544            0 :                     .map_err(BasebackupError::Client)?;
     545              :             } else {
     546            0 :                 warn!("global/pg_filenode.map is missing");
     547              :             }
     548              :         } else {
     549              :             // User defined tablespaces are not supported. However, as
     550              :             // a special case, if a tablespace/db directory is
     551              :             // completely empty, we can leave it out altogether. This
     552              :             // makes taking a base backup after the 'tablespace'
     553              :             // regression test pass, because the test drops the
     554              :             // created tablespaces after the tests.
     555              :             //
     556              :             // FIXME: this wouldn't be necessary, if we handled
     557              :             // XLOG_TBLSPC_DROP records. But we probably should just
     558              :             // throw an error on CREATE TABLESPACE in the first place.
     559            0 :             if !has_relmap_file
     560            0 :                 && self
     561            0 :                     .timeline
     562            0 :                     .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
     563            0 :                     .await
     564            0 :                     .map_err(|e| BasebackupError::Server(e.into()))?
     565            0 :                     .is_empty()
     566              :             {
     567            0 :                 return Ok(());
     568            0 :             }
     569            0 :             // User defined tablespaces are not supported
     570            0 :             if spcnode != DEFAULTTABLESPACE_OID {
     571            0 :                 return Err(BasebackupError::Server(anyhow!(
     572            0 :                     "spcnode != DEFAULTTABLESPACE_OID, spcnode={spcnode}"
     573            0 :                 )));
     574            0 :             }
     575            0 : 
     576            0 :             // Append dir path for each database
     577            0 :             let path = format!("base/{}", dbnode);
     578            0 :             let header = new_tar_header_dir(&path)?;
     579            0 :             self.ar
     580            0 :                 .append(&header, &mut io::empty())
     581            0 :                 .await
     582            0 :                 .map_err(BasebackupError::Client)?;
     583              : 
     584            0 :             if let Some(img) = relmap_img {
     585            0 :                 let dst_path = format!("base/{}/PG_VERSION", dbnode);
     586              : 
     587            0 :                 let pg_version_str = match self.timeline.pg_version {
     588            0 :                     14 | 15 => self.timeline.pg_version.to_string(),
     589            0 :                     ver => format!("{ver}\x0A"),
     590              :                 };
     591            0 :                 let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
     592            0 :                 self.ar
     593            0 :                     .append(&header, pg_version_str.as_bytes())
     594            0 :                     .await
     595            0 :                     .map_err(BasebackupError::Client)?;
     596              : 
     597            0 :                 let relmap_path = format!("base/{}/pg_filenode.map", dbnode);
     598            0 :                 let header = new_tar_header(&relmap_path, img.len() as u64)?;
     599            0 :                 self.ar
     600            0 :                     .append(&header, &img[..])
     601            0 :                     .await
     602            0 :                     .map_err(BasebackupError::Client)?;
     603            0 :             }
     604              :         };
     605            0 :         Ok(())
     606            0 :     }
     607              : 
     608              :     //
     609              :     // Extract twophase state files
     610              :     //
     611            0 :     async fn add_twophase_file(&mut self, xid: u64) -> Result<(), BasebackupError> {
     612            0 :         let img = self
     613            0 :             .timeline
     614            0 :             .get_twophase_file(xid, self.lsn, self.ctx)
     615            0 :             .await
     616            0 :             .map_err(|e| BasebackupError::Server(e.into()))?;
     617              : 
     618            0 :         let mut buf = BytesMut::new();
     619            0 :         buf.extend_from_slice(&img[..]);
     620            0 :         let crc = crc32c::crc32c(&img[..]);
     621            0 :         buf.put_u32_le(crc);
     622            0 :         let path = if self.timeline.pg_version < 17 {
     623            0 :             format!("pg_twophase/{:>08X}", xid)
     624              :         } else {
     625            0 :             format!("pg_twophase/{:>016X}", xid)
     626              :         };
     627            0 :         let header = new_tar_header(&path, buf.len() as u64)?;
     628            0 :         self.ar
     629            0 :             .append(&header, &buf[..])
     630            0 :             .await
     631            0 :             .map_err(BasebackupError::Client)?;
     632              : 
     633            0 :         Ok(())
     634            0 :     }
     635              : 
     636              :     //
     637              :     // Add generated pg_control file and bootstrap WAL segment.
     638              :     // Also send zenith.signal file with extra bootstrap data.
     639              :     //
     640            0 :     async fn add_pgcontrol_file(&mut self) -> Result<(), BasebackupError> {
     641            0 :         // add zenith.signal file
     642            0 :         let mut zenith_signal = String::new();
     643            0 :         if self.prev_record_lsn == Lsn(0) {
     644            0 :             if self.timeline.is_ancestor_lsn(self.lsn) {
     645            0 :                 write!(zenith_signal, "PREV LSN: none")
     646            0 :                     .map_err(|e| BasebackupError::Server(e.into()))?;
     647              :             } else {
     648            0 :                 write!(zenith_signal, "PREV LSN: invalid")
     649            0 :                     .map_err(|e| BasebackupError::Server(e.into()))?;
     650              :             }
     651              :         } else {
     652            0 :             write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn)
     653            0 :                 .map_err(|e| BasebackupError::Server(e.into()))?;
     654              :         }
     655            0 :         self.ar
     656            0 :             .append(
     657            0 :                 &new_tar_header("zenith.signal", zenith_signal.len() as u64)?,
     658            0 :                 zenith_signal.as_bytes(),
     659              :             )
     660            0 :             .await
     661            0 :             .map_err(BasebackupError::Client)?;
     662              : 
     663            0 :         let checkpoint_bytes = self
     664            0 :             .timeline
     665            0 :             .get_checkpoint(self.lsn, self.ctx)
     666            0 :             .await
     667            0 :             .context("failed to get checkpoint bytes")?;
     668            0 :         let pg_control_bytes = self
     669            0 :             .timeline
     670            0 :             .get_control_file(self.lsn, self.ctx)
     671            0 :             .await
     672            0 :             .context("failed get control bytes")?;
     673              : 
     674            0 :         let (pg_control_bytes, system_identifier) = postgres_ffi::generate_pg_control(
     675            0 :             &pg_control_bytes,
     676            0 :             &checkpoint_bytes,
     677            0 :             self.lsn,
     678            0 :             self.timeline.pg_version,
     679            0 :         )?;
     680              : 
     681              :         //send pg_control
     682            0 :         let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
     683            0 :         self.ar
     684            0 :             .append(&header, &pg_control_bytes[..])
     685            0 :             .await
     686            0 :             .map_err(BasebackupError::Client)?;
     687              : 
     688              :         //send wal segment
     689            0 :         let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE);
     690            0 :         let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE);
     691            0 :         let wal_file_path = format!("pg_wal/{}", wal_file_name);
     692            0 :         let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?;
     693              : 
     694            0 :         let wal_seg = postgres_ffi::generate_wal_segment(
     695            0 :             segno,
     696            0 :             system_identifier,
     697            0 :             self.timeline.pg_version,
     698            0 :             self.lsn,
     699            0 :         )
     700            0 :         .map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
     701            0 :         if wal_seg.len() != WAL_SEGMENT_SIZE {
     702            0 :             return Err(BasebackupError::Server(anyhow!(
     703            0 :                 "wal_seg.len() != WAL_SEGMENT_SIZE, wal_seg.len()={}",
     704            0 :                 wal_seg.len()
     705            0 :             )));
     706            0 :         }
     707            0 :         self.ar
     708            0 :             .append(&header, &wal_seg[..])
     709            0 :             .await
     710            0 :             .map_err(BasebackupError::Client)?;
     711            0 :         Ok(())
     712            0 :     }
     713              : }
     714              : 
     715              : //
     716              : // Create new tarball entry header
     717              : //
     718            0 : fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
     719            0 :     let mut header = Header::new_gnu();
     720            0 :     header.set_size(size);
     721            0 :     header.set_path(path)?;
     722            0 :     header.set_mode(0b110000000); // -rw-------
     723            0 :     header.set_mtime(
     724            0 :         // use currenttime as last modified time
     725            0 :         SystemTime::now()
     726            0 :             .duration_since(SystemTime::UNIX_EPOCH)
     727            0 :             .unwrap()
     728            0 :             .as_secs(),
     729            0 :     );
     730            0 :     header.set_cksum();
     731            0 :     Ok(header)
     732            0 : }
     733              : 
     734            0 : fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
     735            0 :     let mut header = Header::new_gnu();
     736            0 :     header.set_size(0);
     737            0 :     header.set_path(path)?;
     738            0 :     header.set_mode(0o755); // -rw-------
     739            0 :     header.set_entry_type(EntryType::dir());
     740            0 :     header.set_mtime(
     741            0 :         // use currenttime as last modified time
     742            0 :         SystemTime::now()
     743            0 :             .duration_since(SystemTime::UNIX_EPOCH)
     744            0 :             .unwrap()
     745            0 :             .as_secs(),
     746            0 :     );
     747            0 :     header.set_cksum();
     748            0 :     Ok(header)
     749            0 : }
        

Generated by: LCOV version 2.1-beta