Line data Source code
1 : //!
2 : //! Generate a tarball with files needed to bootstrap ComputeNode.
3 : //!
4 : //! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
5 : //! It could use a better name.
6 : //!
7 : //! Stateless Postgres compute node is launched by sending a tarball
8 : //! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
9 : //! generated pg_control and dummy segment of WAL.
10 : //! This module is responsible for creation of such tarball
11 : //! from data stored in object storage.
12 : //!
13 : use anyhow::{anyhow, bail, ensure, Context};
14 : use bytes::{BufMut, Bytes, BytesMut};
15 : use fail::fail_point;
16 : use pageserver_api::key::{key_to_slru_block, Key};
17 : use postgres_ffi::pg_constants;
18 : use std::fmt::Write as FmtWrite;
19 : use std::time::SystemTime;
20 : use tokio::io;
21 : use tokio::io::AsyncWrite;
22 : use tracing::*;
23 :
24 : use tokio_tar::{Builder, EntryType, Header};
25 :
26 : use crate::context::RequestContext;
27 : use crate::pgdatadir_mapping::Version;
28 : use crate::tenant::Timeline;
29 : use pageserver_api::reltag::{RelTag, SlruKind};
30 :
31 : use postgres_ffi::dispatch_pgversion;
32 : use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
33 : use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PGDATA_SUBDIRS, PG_HBA};
34 : use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
35 : use postgres_ffi::TransactionId;
36 : use postgres_ffi::XLogFileName;
37 : use postgres_ffi::PG_TLI;
38 : use postgres_ffi::{BLCKSZ, RELSEG_SIZE, WAL_SEGMENT_SIZE};
39 : use utils::lsn::Lsn;
40 :
41 : /// Create basebackup with non-rel data in it.
42 : /// Only include relational data if 'full_backup' is true.
43 : ///
44 : /// Currently we use empty 'req_lsn' in two cases:
45 : /// * During the basebackup right after timeline creation
46 : /// * When working without safekeepers. In this situation it is important to match the lsn
47 : /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
48 : /// to start the replication.
49 0 : pub async fn send_basebackup_tarball<'a, W>(
50 0 : write: &'a mut W,
51 0 : timeline: &'a Timeline,
52 0 : req_lsn: Option<Lsn>,
53 0 : prev_lsn: Option<Lsn>,
54 0 : full_backup: bool,
55 0 : ctx: &'a RequestContext,
56 0 : ) -> anyhow::Result<()>
57 0 : where
58 0 : W: AsyncWrite + Send + Sync + Unpin,
59 0 : {
60 : // Compute postgres doesn't have any previous WAL files, but the first
61 : // record that it's going to write needs to include the LSN of the
62 : // previous record (xl_prev). We include prev_record_lsn in the
63 : // "zenith.signal" file, so that postgres can read it during startup.
64 : //
65 : // We don't keep full history of record boundaries in the page server,
66 : // however, only the predecessor of the latest record on each
67 : // timeline. So we can only provide prev_record_lsn when you take a
68 : // base backup at the end of the timeline, i.e. at last_record_lsn.
69 : // Even at the end of the timeline, we sometimes don't have a valid
70 : // prev_lsn value; that happens if the timeline was just branched from
71 : // an old LSN and it doesn't have any WAL of its own yet. We will set
72 : // prev_lsn to Lsn(0) if we cannot provide the correct value.
73 0 : let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
74 : // Backup was requested at a particular LSN. The caller should've
75 : // already checked that it's a valid LSN.
76 :
77 : // If the requested point is the end of the timeline, we can
78 : // provide prev_lsn. (get_last_record_rlsn() might return it as
79 : // zero, though, if no WAL has been generated on this timeline
80 : // yet.)
81 0 : let end_of_timeline = timeline.get_last_record_rlsn();
82 0 : if req_lsn == end_of_timeline.last {
83 0 : (end_of_timeline.prev, req_lsn)
84 : } else {
85 0 : (Lsn(0), req_lsn)
86 : }
87 : } else {
88 : // Backup was requested at end of the timeline.
89 0 : let end_of_timeline = timeline.get_last_record_rlsn();
90 0 : (end_of_timeline.prev, end_of_timeline.last)
91 : };
92 :
93 : // Consolidate the derived and the provided prev_lsn values
94 0 : let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn {
95 0 : if backup_prev != Lsn(0) {
96 0 : ensure!(backup_prev == provided_prev_lsn);
97 0 : }
98 0 : provided_prev_lsn
99 : } else {
100 0 : backup_prev
101 : };
102 :
103 0 : info!(
104 0 : "taking basebackup lsn={}, prev_lsn={} (full_backup={})",
105 0 : backup_lsn, prev_lsn, full_backup
106 0 : );
107 :
108 0 : let basebackup = Basebackup {
109 0 : ar: Builder::new_non_terminated(write),
110 0 : timeline,
111 0 : lsn: backup_lsn,
112 0 : prev_record_lsn: prev_lsn,
113 0 : full_backup,
114 0 : ctx,
115 0 : };
116 0 : basebackup
117 0 : .send_tarball()
118 0 : .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn))
119 0 : .await
120 0 : }
121 :
122 : /// This is short-living object only for the time of tarball creation,
123 : /// created mostly to avoid passing a lot of parameters between various functions
124 : /// used for constructing tarball.
125 : struct Basebackup<'a, W>
126 : where
127 : W: AsyncWrite + Send + Sync + Unpin,
128 : {
129 : ar: Builder<&'a mut W>,
130 : timeline: &'a Timeline,
131 : lsn: Lsn,
132 : prev_record_lsn: Lsn,
133 : full_backup: bool,
134 : ctx: &'a RequestContext,
135 : }
136 :
137 : /// A sink that accepts SLRU blocks ordered by key and forwards
138 : /// full segments to the archive.
139 : struct SlruSegmentsBuilder<'a, 'b, W>
140 : where
141 : W: AsyncWrite + Send + Sync + Unpin,
142 : {
143 : ar: &'a mut Builder<&'b mut W>,
144 : buf: Vec<u8>,
145 : current_segment: Option<(SlruKind, u32)>,
146 : }
147 :
148 : impl<'a, 'b, W> SlruSegmentsBuilder<'a, 'b, W>
149 : where
150 : W: AsyncWrite + Send + Sync + Unpin,
151 : {
152 0 : fn new(ar: &'a mut Builder<&'b mut W>) -> Self {
153 0 : Self {
154 0 : ar,
155 0 : buf: Vec::new(),
156 0 : current_segment: None,
157 0 : }
158 0 : }
159 :
160 0 : async fn add_block(&mut self, key: &Key, block: Bytes) -> anyhow::Result<()> {
161 0 : let (kind, segno, _) = key_to_slru_block(*key)?;
162 :
163 0 : match kind {
164 : SlruKind::Clog => {
165 0 : ensure!(block.len() == BLCKSZ as usize || block.len() == BLCKSZ as usize + 8);
166 : }
167 : SlruKind::MultiXactMembers | SlruKind::MultiXactOffsets => {
168 0 : ensure!(block.len() == BLCKSZ as usize);
169 : }
170 : }
171 :
172 0 : let segment = (kind, segno);
173 0 : match self.current_segment {
174 0 : None => {
175 0 : self.current_segment = Some(segment);
176 0 : self.buf
177 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
178 0 : }
179 0 : Some(current_seg) if current_seg == segment => {
180 0 : self.buf
181 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
182 0 : }
183 : Some(_) => {
184 0 : self.flush().await?;
185 :
186 0 : self.current_segment = Some(segment);
187 0 : self.buf
188 0 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
189 : }
190 : }
191 :
192 0 : Ok(())
193 0 : }
194 :
195 0 : async fn flush(&mut self) -> anyhow::Result<()> {
196 0 : let nblocks = self.buf.len() / BLCKSZ as usize;
197 0 : let (kind, segno) = self.current_segment.take().unwrap();
198 0 : let segname = format!("{}/{:>04X}", kind.to_str(), segno);
199 0 : let header = new_tar_header(&segname, self.buf.len() as u64)?;
200 0 : self.ar.append(&header, self.buf.as_slice()).await?;
201 :
202 0 : trace!("Added to basebackup slru {} relsize {}", segname, nblocks);
203 :
204 0 : self.buf.clear();
205 0 :
206 0 : Ok(())
207 0 : }
208 :
209 0 : async fn finish(mut self) -> anyhow::Result<()> {
210 0 : if self.current_segment.is_none() || self.buf.is_empty() {
211 0 : return Ok(());
212 0 : }
213 0 :
214 0 : self.flush().await
215 0 : }
216 : }
217 :
218 : impl<'a, W> Basebackup<'a, W>
219 : where
220 : W: AsyncWrite + Send + Sync + Unpin,
221 : {
222 0 : async fn send_tarball(mut self) -> anyhow::Result<()> {
223 : // TODO include checksum
224 :
225 0 : let lazy_slru_download = self.timeline.get_lazy_slru_download() && !self.full_backup;
226 :
227 : // Create pgdata subdirs structure
228 0 : for dir in PGDATA_SUBDIRS.iter() {
229 0 : let header = new_tar_header_dir(dir)?;
230 0 : self.ar
231 0 : .append(&header, &mut io::empty())
232 0 : .await
233 0 : .context("could not add directory to basebackup tarball")?;
234 : }
235 :
236 : // Send config files.
237 0 : for filepath in PGDATA_SPECIAL_FILES.iter() {
238 0 : if *filepath == "pg_hba.conf" {
239 0 : let data = PG_HBA.as_bytes();
240 0 : let header = new_tar_header(filepath, data.len() as u64)?;
241 0 : self.ar
242 0 : .append(&header, data)
243 0 : .await
244 0 : .context("could not add config file to basebackup tarball")?;
245 : } else {
246 0 : let header = new_tar_header(filepath, 0)?;
247 0 : self.ar
248 0 : .append(&header, &mut io::empty())
249 0 : .await
250 0 : .context("could not add config file to basebackup tarball")?;
251 : }
252 : }
253 0 : if !lazy_slru_download {
254 : // Gather non-relational files from object storage pages.
255 0 : let slru_partitions = self
256 0 : .timeline
257 0 : .get_slru_keyspace(Version::Lsn(self.lsn), self.ctx)
258 0 : .await?
259 0 : .partition(Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64);
260 0 :
261 0 : let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar);
262 :
263 0 : for part in slru_partitions.parts {
264 0 : let blocks = self.timeline.get_vectored(part, self.lsn, self.ctx).await?;
265 :
266 0 : for (key, block) in blocks {
267 0 : slru_builder.add_block(&key, block?).await?;
268 : }
269 : }
270 0 : slru_builder.finish().await?;
271 0 : }
272 :
273 0 : let mut min_restart_lsn: Lsn = Lsn::MAX;
274 : // Create tablespace directories
275 0 : for ((spcnode, dbnode), has_relmap_file) in
276 0 : self.timeline.list_dbdirs(self.lsn, self.ctx).await?
277 : {
278 0 : self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
279 :
280 : // If full backup is requested, include all relation files.
281 : // Otherwise only include init forks of unlogged relations.
282 0 : let rels = self
283 0 : .timeline
284 0 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
285 0 : .await?;
286 0 : for &rel in rels.iter() {
287 : // Send init fork as main fork to provide well formed empty
288 : // contents of UNLOGGED relations. Postgres copies it in
289 : // `reinit.c` during recovery.
290 0 : if rel.forknum == INIT_FORKNUM {
291 : // I doubt we need _init fork itself, but having it at least
292 : // serves as a marker relation is unlogged.
293 0 : self.add_rel(rel, rel).await?;
294 0 : self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
295 0 : continue;
296 0 : }
297 0 :
298 0 : if self.full_backup {
299 0 : if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
300 : {
301 : // skip this, will include it when we reach the init fork
302 0 : continue;
303 0 : }
304 0 : self.add_rel(rel, rel).await?;
305 0 : }
306 : }
307 :
308 0 : for (path, content) in self.timeline.list_aux_files(self.lsn, self.ctx).await? {
309 0 : if path.starts_with("pg_replslot") {
310 0 : let offs = pg_constants::REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN;
311 0 : let restart_lsn = Lsn(u64::from_le_bytes(
312 0 : content[offs..offs + 8].try_into().unwrap(),
313 0 : ));
314 0 : info!("Replication slot {} restart LSN={}", path, restart_lsn);
315 0 : min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn);
316 0 : }
317 0 : let header = new_tar_header(&path, content.len() as u64)?;
318 0 : self.ar
319 0 : .append(&header, &*content)
320 0 : .await
321 0 : .context("could not add aux file to basebackup tarball")?;
322 : }
323 : }
324 0 : if min_restart_lsn != Lsn::MAX {
325 0 : info!(
326 0 : "Min restart LSN for logical replication is {}",
327 0 : min_restart_lsn
328 0 : );
329 0 : let data = min_restart_lsn.0.to_le_bytes();
330 0 : let header = new_tar_header("restart.lsn", data.len() as u64)?;
331 0 : self.ar
332 0 : .append(&header, &data[..])
333 0 : .await
334 0 : .context("could not add restart.lsn file to basebackup tarball")?;
335 0 : }
336 0 : for xid in self
337 0 : .timeline
338 0 : .list_twophase_files(self.lsn, self.ctx)
339 0 : .await?
340 : {
341 0 : self.add_twophase_file(xid).await?;
342 : }
343 :
344 0 : fail_point!("basebackup-before-control-file", |_| {
345 0 : bail!("failpoint basebackup-before-control-file")
346 0 : });
347 :
348 : // Generate pg_control and bootstrap WAL segment.
349 0 : self.add_pgcontrol_file().await?;
350 0 : self.ar.finish().await?;
351 0 : debug!("all tarred up!");
352 0 : Ok(())
353 0 : }
354 :
355 : /// Add contents of relfilenode `src`, naming it as `dst`.
356 0 : async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> anyhow::Result<()> {
357 0 : let nblocks = self
358 0 : .timeline
359 0 : .get_rel_size(src, Version::Lsn(self.lsn), false, self.ctx)
360 0 : .await?;
361 :
362 : // If the relation is empty, create an empty file
363 0 : if nblocks == 0 {
364 0 : let file_name = dst.to_segfile_name(0);
365 0 : let header = new_tar_header(&file_name, 0)?;
366 0 : self.ar.append(&header, &mut io::empty()).await?;
367 0 : return Ok(());
368 0 : }
369 0 :
370 0 : // Add a file for each chunk of blocks (aka segment)
371 0 : let mut startblk = 0;
372 0 : let mut seg = 0;
373 0 : while startblk < nblocks {
374 0 : let endblk = std::cmp::min(startblk + RELSEG_SIZE, nblocks);
375 0 :
376 0 : let mut segment_data: Vec<u8> = vec![];
377 0 : for blknum in startblk..endblk {
378 0 : let img = self
379 0 : .timeline
380 0 : .get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), false, self.ctx)
381 0 : .await?;
382 0 : segment_data.extend_from_slice(&img[..]);
383 : }
384 :
385 0 : let file_name = dst.to_segfile_name(seg as u32);
386 0 : let header = new_tar_header(&file_name, segment_data.len() as u64)?;
387 0 : self.ar.append(&header, segment_data.as_slice()).await?;
388 :
389 0 : seg += 1;
390 0 : startblk = endblk;
391 : }
392 :
393 0 : Ok(())
394 0 : }
395 :
396 : //
397 : // Include database/tablespace directories.
398 : //
399 : // Each directory contains a PG_VERSION file, and the default database
400 : // directories also contain pg_filenode.map files.
401 : //
402 0 : async fn add_dbdir(
403 0 : &mut self,
404 0 : spcnode: u32,
405 0 : dbnode: u32,
406 0 : has_relmap_file: bool,
407 0 : ) -> anyhow::Result<()> {
408 0 : let relmap_img = if has_relmap_file {
409 0 : let img = self
410 0 : .timeline
411 0 : .get_relmap_file(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
412 0 : .await?;
413 :
414 0 : ensure!(
415 0 : img.len()
416 0 : == dispatch_pgversion!(
417 0 : self.timeline.pg_version,
418 0 : pgv::bindings::SIZEOF_RELMAPFILE
419 : )
420 : );
421 :
422 0 : Some(img)
423 : } else {
424 0 : None
425 : };
426 :
427 0 : if spcnode == GLOBALTABLESPACE_OID {
428 0 : let pg_version_str = match self.timeline.pg_version {
429 0 : 14 | 15 => self.timeline.pg_version.to_string(),
430 0 : ver => format!("{ver}\x0A"),
431 : };
432 0 : let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
433 0 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
434 :
435 0 : info!("timeline.pg_version {}", self.timeline.pg_version);
436 :
437 0 : if let Some(img) = relmap_img {
438 : // filenode map for global tablespace
439 0 : let header = new_tar_header("global/pg_filenode.map", img.len() as u64)?;
440 0 : self.ar.append(&header, &img[..]).await?;
441 : } else {
442 0 : warn!("global/pg_filenode.map is missing");
443 : }
444 : } else {
445 : // User defined tablespaces are not supported. However, as
446 : // a special case, if a tablespace/db directory is
447 : // completely empty, we can leave it out altogether. This
448 : // makes taking a base backup after the 'tablespace'
449 : // regression test pass, because the test drops the
450 : // created tablespaces after the tests.
451 : //
452 : // FIXME: this wouldn't be necessary, if we handled
453 : // XLOG_TBLSPC_DROP records. But we probably should just
454 : // throw an error on CREATE TABLESPACE in the first place.
455 0 : if !has_relmap_file
456 0 : && self
457 0 : .timeline
458 0 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
459 0 : .await?
460 0 : .is_empty()
461 : {
462 0 : return Ok(());
463 0 : }
464 0 : // User defined tablespaces are not supported
465 0 : ensure!(spcnode == DEFAULTTABLESPACE_OID);
466 :
467 : // Append dir path for each database
468 0 : let path = format!("base/{}", dbnode);
469 0 : let header = new_tar_header_dir(&path)?;
470 0 : self.ar.append(&header, &mut io::empty()).await?;
471 :
472 0 : if let Some(img) = relmap_img {
473 0 : let dst_path = format!("base/{}/PG_VERSION", dbnode);
474 :
475 0 : let pg_version_str = match self.timeline.pg_version {
476 0 : 14 | 15 => self.timeline.pg_version.to_string(),
477 0 : ver => format!("{ver}\x0A"),
478 : };
479 0 : let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
480 0 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
481 :
482 0 : let relmap_path = format!("base/{}/pg_filenode.map", dbnode);
483 0 : let header = new_tar_header(&relmap_path, img.len() as u64)?;
484 0 : self.ar.append(&header, &img[..]).await?;
485 0 : }
486 : };
487 0 : Ok(())
488 0 : }
489 :
490 : //
491 : // Extract twophase state files
492 : //
493 0 : async fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
494 0 : let img = self
495 0 : .timeline
496 0 : .get_twophase_file(xid, self.lsn, self.ctx)
497 0 : .await?;
498 :
499 0 : let mut buf = BytesMut::new();
500 0 : buf.extend_from_slice(&img[..]);
501 0 : let crc = crc32c::crc32c(&img[..]);
502 0 : buf.put_u32_le(crc);
503 0 : let path = format!("pg_twophase/{:>08X}", xid);
504 0 : let header = new_tar_header(&path, buf.len() as u64)?;
505 0 : self.ar.append(&header, &buf[..]).await?;
506 :
507 0 : Ok(())
508 0 : }
509 :
510 : //
511 : // Add generated pg_control file and bootstrap WAL segment.
512 : // Also send zenith.signal file with extra bootstrap data.
513 : //
514 0 : async fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
515 0 : // add zenith.signal file
516 0 : let mut zenith_signal = String::new();
517 0 : if self.prev_record_lsn == Lsn(0) {
518 0 : if self.lsn == self.timeline.get_ancestor_lsn() {
519 0 : write!(zenith_signal, "PREV LSN: none")?;
520 : } else {
521 0 : write!(zenith_signal, "PREV LSN: invalid")?;
522 : }
523 : } else {
524 0 : write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn)?;
525 : }
526 0 : self.ar
527 0 : .append(
528 0 : &new_tar_header("zenith.signal", zenith_signal.len() as u64)?,
529 0 : zenith_signal.as_bytes(),
530 : )
531 0 : .await?;
532 :
533 0 : let checkpoint_bytes = self
534 0 : .timeline
535 0 : .get_checkpoint(self.lsn, self.ctx)
536 0 : .await
537 0 : .context("failed to get checkpoint bytes")?;
538 0 : let pg_control_bytes = self
539 0 : .timeline
540 0 : .get_control_file(self.lsn, self.ctx)
541 0 : .await
542 0 : .context("failed get control bytes")?;
543 :
544 0 : let (pg_control_bytes, system_identifier) = postgres_ffi::generate_pg_control(
545 0 : &pg_control_bytes,
546 0 : &checkpoint_bytes,
547 0 : self.lsn,
548 0 : self.timeline.pg_version,
549 0 : )?;
550 :
551 : //send pg_control
552 0 : let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
553 0 : self.ar.append(&header, &pg_control_bytes[..]).await?;
554 :
555 : //send wal segment
556 0 : let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE);
557 0 : let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE);
558 0 : let wal_file_path = format!("pg_wal/{}", wal_file_name);
559 0 : let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?;
560 :
561 0 : let wal_seg = postgres_ffi::generate_wal_segment(
562 0 : segno,
563 0 : system_identifier,
564 0 : self.timeline.pg_version,
565 0 : self.lsn,
566 0 : )
567 0 : .map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
568 0 : ensure!(wal_seg.len() == WAL_SEGMENT_SIZE);
569 0 : self.ar.append(&header, &wal_seg[..]).await?;
570 0 : Ok(())
571 0 : }
572 : }
573 :
574 : //
575 : // Create new tarball entry header
576 : //
577 0 : fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
578 0 : let mut header = Header::new_gnu();
579 0 : header.set_size(size);
580 0 : header.set_path(path)?;
581 0 : header.set_mode(0b110000000); // -rw-------
582 0 : header.set_mtime(
583 0 : // use currenttime as last modified time
584 0 : SystemTime::now()
585 0 : .duration_since(SystemTime::UNIX_EPOCH)
586 0 : .unwrap()
587 0 : .as_secs(),
588 0 : );
589 0 : header.set_cksum();
590 0 : Ok(header)
591 0 : }
592 :
593 0 : fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
594 0 : let mut header = Header::new_gnu();
595 0 : header.set_size(0);
596 0 : header.set_path(path)?;
597 0 : header.set_mode(0o755); // -rw-------
598 0 : header.set_entry_type(EntryType::dir());
599 0 : header.set_mtime(
600 0 : // use currenttime as last modified time
601 0 : SystemTime::now()
602 0 : .duration_since(SystemTime::UNIX_EPOCH)
603 0 : .unwrap()
604 0 : .as_secs(),
605 0 : );
606 0 : header.set_cksum();
607 0 : Ok(header)
608 0 : }
|