Line data Source code
1 : //!
2 : //! Generate a tarball with files needed to bootstrap ComputeNode.
3 : //!
4 : //! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
5 : //! It could use a better name.
6 : //!
7 : //! Stateless Postgres compute node is launched by sending a tarball
8 : //! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
9 : //! generated pg_control and dummy segment of WAL.
10 : //! This module is responsible for creation of such tarball
11 : //! from data stored in object storage.
12 : //!
13 : use anyhow::{anyhow, bail, ensure, Context};
14 : use bytes::{BufMut, Bytes, BytesMut};
15 : use fail::fail_point;
16 : use pageserver_api::key::{key_to_slru_block, Key};
17 : use postgres_ffi::pg_constants;
18 : use std::fmt::Write as FmtWrite;
19 : use std::time::SystemTime;
20 : use tokio::io;
21 : use tokio::io::AsyncWrite;
22 : use tracing::*;
23 :
24 : use tokio_tar::{Builder, EntryType, Header};
25 :
26 : use crate::context::RequestContext;
27 : use crate::pgdatadir_mapping::Version;
28 : use crate::tenant::Timeline;
29 : use pageserver_api::reltag::{RelTag, SlruKind};
30 :
31 : use postgres_ffi::dispatch_pgversion;
32 : use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
33 : use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PGDATA_SUBDIRS, PG_HBA};
34 : use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
35 : use postgres_ffi::TransactionId;
36 : use postgres_ffi::XLogFileName;
37 : use postgres_ffi::PG_TLI;
38 : use postgres_ffi::{BLCKSZ, RELSEG_SIZE, WAL_SEGMENT_SIZE};
39 : use utils::lsn::Lsn;
40 :
41 : /// Create basebackup with non-rel data in it.
42 : /// Only include relational data if 'full_backup' is true.
43 : ///
44 : /// Currently we use empty 'req_lsn' in two cases:
45 : /// * During the basebackup right after timeline creation
46 : /// * When working without safekeepers. In this situation it is important to match the lsn
47 : /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
48 : /// to start the replication.
49 609 : pub async fn send_basebackup_tarball<'a, W>(
50 609 : write: &'a mut W,
51 609 : timeline: &'a Timeline,
52 609 : req_lsn: Option<Lsn>,
53 609 : prev_lsn: Option<Lsn>,
54 609 : full_backup: bool,
55 609 : ctx: &'a RequestContext,
56 609 : ) -> anyhow::Result<()>
57 609 : where
58 609 : W: AsyncWrite + Send + Sync + Unpin,
59 609 : {
60 : // Compute postgres doesn't have any previous WAL files, but the first
61 : // record that it's going to write needs to include the LSN of the
62 : // previous record (xl_prev). We include prev_record_lsn in the
63 : // "zenith.signal" file, so that postgres can read it during startup.
64 : //
65 : // We don't keep full history of record boundaries in the page server,
66 : // however, only the predecessor of the latest record on each
67 : // timeline. So we can only provide prev_record_lsn when you take a
68 : // base backup at the end of the timeline, i.e. at last_record_lsn.
69 : // Even at the end of the timeline, we sometimes don't have a valid
70 : // prev_lsn value; that happens if the timeline was just branched from
71 : // an old LSN and it doesn't have any WAL of its own yet. We will set
72 : // prev_lsn to Lsn(0) if we cannot provide the correct value.
73 609 : let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
74 : // Backup was requested at a particular LSN. The caller should've
75 : // already checked that it's a valid LSN.
76 :
77 : // If the requested point is the end of the timeline, we can
78 : // provide prev_lsn. (get_last_record_rlsn() might return it as
79 : // zero, though, if no WAL has been generated on this timeline
80 : // yet.)
81 190 : let end_of_timeline = timeline.get_last_record_rlsn();
82 190 : if req_lsn == end_of_timeline.last {
83 151 : (end_of_timeline.prev, req_lsn)
84 : } else {
85 39 : (Lsn(0), req_lsn)
86 : }
87 : } else {
88 : // Backup was requested at end of the timeline.
89 419 : let end_of_timeline = timeline.get_last_record_rlsn();
90 419 : (end_of_timeline.prev, end_of_timeline.last)
91 : };
92 :
93 : // Consolidate the derived and the provided prev_lsn values
94 609 : let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn {
95 12 : if backup_prev != Lsn(0) {
96 10 : ensure!(backup_prev == provided_prev_lsn);
97 2 : }
98 12 : provided_prev_lsn
99 : } else {
100 597 : backup_prev
101 : };
102 :
103 609 : info!(
104 609 : "taking basebackup lsn={}, prev_lsn={} (full_backup={})",
105 609 : backup_lsn, prev_lsn, full_backup
106 609 : );
107 :
108 609 : let basebackup = Basebackup {
109 609 : ar: Builder::new_non_terminated(write),
110 609 : timeline,
111 609 : lsn: backup_lsn,
112 609 : prev_record_lsn: prev_lsn,
113 609 : full_backup,
114 609 : ctx,
115 609 : };
116 609 : basebackup
117 609 : .send_tarball()
118 609 : .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn))
119 54717 : .await
120 609 : }
121 :
122 : /// This is short-living object only for the time of tarball creation,
123 : /// created mostly to avoid passing a lot of parameters between various functions
124 : /// used for constructing tarball.
125 : struct Basebackup<'a, W>
126 : where
127 : W: AsyncWrite + Send + Sync + Unpin,
128 : {
129 : ar: Builder<&'a mut W>,
130 : timeline: &'a Timeline,
131 : lsn: Lsn,
132 : prev_record_lsn: Lsn,
133 : full_backup: bool,
134 : ctx: &'a RequestContext,
135 : }
136 :
137 : /// A sink that accepts SLRU blocks ordered by key and forwards
138 : /// full segments to the archive.
139 : struct SlruSegmentsBuilder<'a, 'b, W>
140 : where
141 : W: AsyncWrite + Send + Sync + Unpin,
142 : {
143 : ar: &'a mut Builder<&'b mut W>,
144 : buf: Vec<u8>,
145 : current_segment: Option<(SlruKind, u32)>,
146 : }
147 :
148 : impl<'a, 'b, W> SlruSegmentsBuilder<'a, 'b, W>
149 : where
150 : W: AsyncWrite + Send + Sync + Unpin,
151 : {
152 603 : fn new(ar: &'a mut Builder<&'b mut W>) -> Self {
153 603 : Self {
154 603 : ar,
155 603 : buf: Vec::new(),
156 603 : current_segment: None,
157 603 : }
158 603 : }
159 :
160 3040 : async fn add_block(&mut self, key: &Key, block: Bytes) -> anyhow::Result<()> {
161 3040 : let (kind, segno, _) = key_to_slru_block(*key)?;
162 :
163 3040 : match kind {
164 : SlruKind::Clog => {
165 1236 : ensure!(block.len() == BLCKSZ as usize || block.len() == BLCKSZ as usize + 8);
166 : }
167 : SlruKind::MultiXactMembers | SlruKind::MultiXactOffsets => {
168 1804 : ensure!(block.len() == BLCKSZ as usize);
169 : }
170 : }
171 :
172 3040 : let segment = (kind, segno);
173 2437 : match self.current_segment {
174 603 : None => {
175 603 : self.current_segment = Some(segment);
176 603 : self.buf
177 603 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
178 603 : }
179 2437 : Some(current_seg) if current_seg == segment => {
180 1195 : self.buf
181 1195 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
182 1195 : }
183 : Some(_) => {
184 1242 : self.flush().await?;
185 :
186 1242 : self.current_segment = Some(segment);
187 1242 : self.buf
188 1242 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
189 : }
190 : }
191 :
192 3040 : Ok(())
193 3040 : }
194 :
195 1845 : async fn flush(&mut self) -> anyhow::Result<()> {
196 1845 : let nblocks = self.buf.len() / BLCKSZ as usize;
197 1845 : let (kind, segno) = self.current_segment.take().unwrap();
198 1845 : let segname = format!("{}/{:>04X}", kind.to_str(), segno);
199 1845 : let header = new_tar_header(&segname, self.buf.len() as u64)?;
200 1845 : self.ar.append(&header, self.buf.as_slice()).await?;
201 :
202 0 : trace!("Added to basebackup slru {} relsize {}", segname, nblocks);
203 :
204 1845 : self.buf.clear();
205 1845 :
206 1845 : Ok(())
207 1845 : }
208 :
209 603 : async fn finish(mut self) -> anyhow::Result<()> {
210 603 : if self.current_segment.is_none() || self.buf.is_empty() {
211 0 : return Ok(());
212 603 : }
213 603 :
214 603 : self.flush().await
215 603 : }
216 : }
217 :
218 : impl<'a, W> Basebackup<'a, W>
219 : where
220 : W: AsyncWrite + Send + Sync + Unpin,
221 : {
222 609 : async fn send_tarball(mut self) -> anyhow::Result<()> {
223 : // TODO include checksum
224 :
225 609 : let lazy_slru_download = self.timeline.get_lazy_slru_download() && !self.full_backup;
226 :
227 : // Create pgdata subdirs structure
228 13398 : for dir in PGDATA_SUBDIRS.iter() {
229 13398 : let header = new_tar_header_dir(dir)?;
230 13398 : self.ar
231 13398 : .append(&header, &mut io::empty())
232 0 : .await
233 13398 : .context("could not add directory to basebackup tarball")?;
234 : }
235 :
236 : // Send config files.
237 1827 : for filepath in PGDATA_SPECIAL_FILES.iter() {
238 1827 : if *filepath == "pg_hba.conf" {
239 609 : let data = PG_HBA.as_bytes();
240 609 : let header = new_tar_header(filepath, data.len() as u64)?;
241 609 : self.ar
242 609 : .append(&header, data)
243 0 : .await
244 609 : .context("could not add config file to basebackup tarball")?;
245 : } else {
246 1218 : let header = new_tar_header(filepath, 0)?;
247 1218 : self.ar
248 1218 : .append(&header, &mut io::empty())
249 0 : .await
250 1218 : .context("could not add config file to basebackup tarball")?;
251 : }
252 : }
253 609 : if !lazy_slru_download {
254 : // Gather non-relational files from object storage pages.
255 609 : let slru_partitions = self
256 609 : .timeline
257 609 : .get_slru_keyspace(Version::Lsn(self.lsn), self.ctx)
258 261 : .await?
259 603 : .partition(Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64);
260 603 :
261 603 : let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar);
262 :
263 1245 : for part in slru_partitions.parts {
264 642 : let blocks = self
265 642 : .timeline
266 642 : .get_vectored(&part.ranges, self.lsn, self.ctx)
267 37217 : .await?;
268 :
269 3682 : for (key, block) in blocks {
270 3040 : slru_builder.add_block(&key, block?).await?;
271 : }
272 : }
273 603 : slru_builder.finish().await?;
274 0 : }
275 :
276 603 : let mut min_restart_lsn: Lsn = Lsn::MAX;
277 : // Create tablespace directories
278 2424 : for ((spcnode, dbnode), has_relmap_file) in
279 603 : self.timeline.list_dbdirs(self.lsn, self.ctx).await?
280 : {
281 2424 : self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
282 :
283 : // If full backup is requested, include all relation files.
284 : // Otherwise only include init forks of unlogged relations.
285 2424 : let rels = self
286 2424 : .timeline
287 2424 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
288 379 : .await?;
289 569562 : for &rel in rels.iter() {
290 : // Send init fork as main fork to provide well formed empty
291 : // contents of UNLOGGED relations. Postgres copies it in
292 : // `reinit.c` during recovery.
293 569562 : if rel.forknum == INIT_FORKNUM {
294 : // I doubt we need _init fork itself, but having it at least
295 : // serves as a marker relation is unlogged.
296 2 : self.add_rel(rel, rel).await?;
297 2 : self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
298 2 : continue;
299 569560 : }
300 569560 :
301 569560 : if self.full_backup {
302 14014 : if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
303 : {
304 : // skip this, will include it when we reach the init fork
305 0 : continue;
306 14014 : }
307 14014 : self.add_rel(rel, rel).await?;
308 555546 : }
309 : }
310 :
311 2424 : for (path, content) in self.timeline.list_aux_files(self.lsn, self.ctx).await? {
312 692 : if path.starts_with("pg_replslot") {
313 16 : let offs = pg_constants::REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN;
314 16 : let restart_lsn = Lsn(u64::from_le_bytes(
315 16 : content[offs..offs + 8].try_into().unwrap(),
316 16 : ));
317 16 : info!("Replication slot {} restart LSN={}", path, restart_lsn);
318 16 : min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn);
319 676 : }
320 692 : let header = new_tar_header(&path, content.len() as u64)?;
321 692 : self.ar
322 692 : .append(&header, &*content)
323 8 : .await
324 692 : .context("could not add aux file to basebackup tarball")?;
325 : }
326 : }
327 603 : if min_restart_lsn != Lsn::MAX {
328 4 : info!(
329 4 : "Min restart LSN for logical replication is {}",
330 4 : min_restart_lsn
331 4 : );
332 4 : let data = min_restart_lsn.0.to_le_bytes();
333 4 : let header = new_tar_header("restart.lsn", data.len() as u64)?;
334 4 : self.ar
335 4 : .append(&header, &data[..])
336 0 : .await
337 4 : .context("could not add restart.lsn file to basebackup tarball")?;
338 599 : }
339 603 : for xid in self
340 603 : .timeline
341 603 : .list_twophase_files(self.lsn, self.ctx)
342 25 : .await?
343 : {
344 2 : self.add_twophase_file(xid).await?;
345 : }
346 :
347 603 : fail_point!("basebackup-before-control-file", |_| {
348 6 : bail!("failpoint basebackup-before-control-file")
349 603 : });
350 :
351 : // Generate pg_control and bootstrap WAL segment.
352 7909 : self.add_pgcontrol_file().await?;
353 597 : self.ar.finish().await?;
354 0 : debug!("all tarred up!");
355 597 : Ok(())
356 609 : }
357 :
358 : /// Add contents of relfilenode `src`, naming it as `dst`.
359 14018 : async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> anyhow::Result<()> {
360 14018 : let horizon = self.lsn; // we do not need latest version
361 14018 : let nblocks = self
362 14018 : .timeline
363 14018 : .get_rel_size(src, Version::Lsn(self.lsn), horizon, self.ctx)
364 723 : .await?;
365 :
366 : // If the relation is empty, create an empty file
367 14018 : if nblocks == 0 {
368 2400 : let file_name = dst.to_segfile_name(0);
369 2400 : let header = new_tar_header(&file_name, 0)?;
370 2400 : self.ar.append(&header, &mut io::empty()).await?;
371 2400 : return Ok(());
372 11618 : }
373 11618 :
374 11618 : // Add a file for each chunk of blocks (aka segment)
375 11618 : let mut startblk = 0;
376 11618 : let mut seg = 0;
377 23236 : while startblk < nblocks {
378 11618 : let endblk = std::cmp::min(startblk + RELSEG_SIZE, nblocks);
379 11618 :
380 11618 : let mut segment_data: Vec<u8> = vec![];
381 46718 : for blknum in startblk..endblk {
382 46718 : let img = self
383 46718 : .timeline
384 46718 : .get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), horizon, self.ctx)
385 7248 : .await?;
386 46718 : segment_data.extend_from_slice(&img[..]);
387 : }
388 :
389 11618 : let file_name = dst.to_segfile_name(seg as u32);
390 11618 : let header = new_tar_header(&file_name, segment_data.len() as u64)?;
391 11618 : self.ar.append(&header, segment_data.as_slice()).await?;
392 :
393 11618 : seg += 1;
394 11618 : startblk = endblk;
395 : }
396 :
397 11618 : Ok(())
398 14018 : }
399 :
400 : //
401 : // Include database/tablespace directories.
402 : //
403 : // Each directory contains a PG_VERSION file, and the default database
404 : // directories also contain pg_filenode.map files.
405 : //
406 2424 : async fn add_dbdir(
407 2424 : &mut self,
408 2424 : spcnode: u32,
409 2424 : dbnode: u32,
410 2424 : has_relmap_file: bool,
411 2424 : ) -> anyhow::Result<()> {
412 2424 : let relmap_img = if has_relmap_file {
413 2422 : let img = self
414 2422 : .timeline
415 2422 : .get_relmap_file(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
416 176 : .await?;
417 :
418 2422 : ensure!(
419 2422 : img.len()
420 2422 : == dispatch_pgversion!(
421 2422 : self.timeline.pg_version,
422 2422 : pgv::bindings::SIZEOF_RELMAPFILE
423 : )
424 : );
425 :
426 2422 : Some(img)
427 : } else {
428 2 : None
429 : };
430 :
431 2424 : if spcnode == GLOBALTABLESPACE_OID {
432 603 : let pg_version_str = match self.timeline.pg_version {
433 603 : 14 | 15 => self.timeline.pg_version.to_string(),
434 0 : ver => format!("{ver}\x0A"),
435 : };
436 603 : let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
437 603 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
438 :
439 603 : info!("timeline.pg_version {}", self.timeline.pg_version);
440 :
441 603 : if let Some(img) = relmap_img {
442 : // filenode map for global tablespace
443 603 : let header = new_tar_header("global/pg_filenode.map", img.len() as u64)?;
444 603 : self.ar.append(&header, &img[..]).await?;
445 : } else {
446 0 : warn!("global/pg_filenode.map is missing");
447 : }
448 : } else {
449 : // User defined tablespaces are not supported. However, as
450 : // a special case, if a tablespace/db directory is
451 : // completely empty, we can leave it out altogether. This
452 : // makes taking a base backup after the 'tablespace'
453 : // regression test pass, because the test drops the
454 : // created tablespaces after the tests.
455 : //
456 : // FIXME: this wouldn't be necessary, if we handled
457 : // XLOG_TBLSPC_DROP records. But we probably should just
458 : // throw an error on CREATE TABLESPACE in the first place.
459 1821 : if !has_relmap_file
460 2 : && self
461 2 : .timeline
462 2 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
463 0 : .await?
464 2 : .is_empty()
465 : {
466 2 : return Ok(());
467 1819 : }
468 1819 : // User defined tablespaces are not supported
469 1819 : ensure!(spcnode == DEFAULTTABLESPACE_OID);
470 :
471 : // Append dir path for each database
472 1819 : let path = format!("base/{}", dbnode);
473 1819 : let header = new_tar_header_dir(&path)?;
474 1819 : self.ar.append(&header, &mut io::empty()).await?;
475 :
476 1819 : if let Some(img) = relmap_img {
477 1819 : let dst_path = format!("base/{}/PG_VERSION", dbnode);
478 :
479 1819 : let pg_version_str = match self.timeline.pg_version {
480 1819 : 14 | 15 => self.timeline.pg_version.to_string(),
481 0 : ver => format!("{ver}\x0A"),
482 : };
483 1819 : let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
484 1819 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
485 :
486 1819 : let relmap_path = format!("base/{}/pg_filenode.map", dbnode);
487 1819 : let header = new_tar_header(&relmap_path, img.len() as u64)?;
488 1819 : self.ar.append(&header, &img[..]).await?;
489 0 : }
490 : };
491 2422 : Ok(())
492 2424 : }
493 :
494 : //
495 : // Extract twophase state files
496 : //
497 2 : async fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
498 2 : let img = self
499 2 : .timeline
500 2 : .get_twophase_file(xid, self.lsn, self.ctx)
501 0 : .await?;
502 :
503 2 : let mut buf = BytesMut::new();
504 2 : buf.extend_from_slice(&img[..]);
505 2 : let crc = crc32c::crc32c(&img[..]);
506 2 : buf.put_u32_le(crc);
507 2 : let path = format!("pg_twophase/{:>08X}", xid);
508 2 : let header = new_tar_header(&path, buf.len() as u64)?;
509 2 : self.ar.append(&header, &buf[..]).await?;
510 :
511 2 : Ok(())
512 2 : }
513 :
514 : //
515 : // Add generated pg_control file and bootstrap WAL segment.
516 : // Also send zenith.signal file with extra bootstrap data.
517 : //
518 597 : async fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
519 597 : // add zenith.signal file
520 597 : let mut zenith_signal = String::new();
521 597 : if self.prev_record_lsn == Lsn(0) {
522 47 : if self.lsn == self.timeline.get_ancestor_lsn() {
523 10 : write!(zenith_signal, "PREV LSN: none")?;
524 : } else {
525 37 : write!(zenith_signal, "PREV LSN: invalid")?;
526 : }
527 : } else {
528 550 : write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn)?;
529 : }
530 597 : self.ar
531 597 : .append(
532 597 : &new_tar_header("zenith.signal", zenith_signal.len() as u64)?,
533 597 : zenith_signal.as_bytes(),
534 : )
535 4 : .await?;
536 :
537 597 : let checkpoint_bytes = self
538 597 : .timeline
539 597 : .get_checkpoint(self.lsn, self.ctx)
540 19 : .await
541 597 : .context("failed to get checkpoint bytes")?;
542 597 : let pg_control_bytes = self
543 597 : .timeline
544 597 : .get_control_file(self.lsn, self.ctx)
545 30 : .await
546 597 : .context("failed get control bytes")?;
547 :
548 597 : let (pg_control_bytes, system_identifier) = postgres_ffi::generate_pg_control(
549 597 : &pg_control_bytes,
550 597 : &checkpoint_bytes,
551 597 : self.lsn,
552 597 : self.timeline.pg_version,
553 597 : )?;
554 :
555 : //send pg_control
556 597 : let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
557 597 : self.ar.append(&header, &pg_control_bytes[..]).await?;
558 :
559 : //send wal segment
560 597 : let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE);
561 597 : let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE);
562 597 : let wal_file_path = format!("pg_wal/{}", wal_file_name);
563 597 : let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?;
564 :
565 597 : let wal_seg = postgres_ffi::generate_wal_segment(
566 597 : segno,
567 597 : system_identifier,
568 597 : self.timeline.pg_version,
569 597 : self.lsn,
570 597 : )
571 597 : .map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
572 597 : ensure!(wal_seg.len() == WAL_SEGMENT_SIZE);
573 7852 : self.ar.append(&header, &wal_seg[..]).await?;
574 597 : Ok(())
575 597 : }
576 : }
577 :
578 : //
579 : // Create new tarball entry header
580 : //
581 25023 : fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
582 25023 : let mut header = Header::new_gnu();
583 25023 : header.set_size(size);
584 25023 : header.set_path(path)?;
585 25023 : header.set_mode(0b110000000); // -rw-------
586 25023 : header.set_mtime(
587 25023 : // use currenttime as last modified time
588 25023 : SystemTime::now()
589 25023 : .duration_since(SystemTime::UNIX_EPOCH)
590 25023 : .unwrap()
591 25023 : .as_secs(),
592 25023 : );
593 25023 : header.set_cksum();
594 25023 : Ok(header)
595 25023 : }
596 :
597 15217 : fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
598 15217 : let mut header = Header::new_gnu();
599 15217 : header.set_size(0);
600 15217 : header.set_path(path)?;
601 15217 : header.set_mode(0o755); // -rw-------
602 15217 : header.set_entry_type(EntryType::dir());
603 15217 : header.set_mtime(
604 15217 : // use currenttime as last modified time
605 15217 : SystemTime::now()
606 15217 : .duration_since(SystemTime::UNIX_EPOCH)
607 15217 : .unwrap()
608 15217 : .as_secs(),
609 15217 : );
610 15217 : header.set_cksum();
611 15217 : Ok(header)
612 15217 : }
|