Line data Source code
1 : //!
2 : //! Generate a tarball with files needed to bootstrap ComputeNode.
3 : //!
4 : //! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
5 : //! It could use a better name.
6 : //!
7 : //! Stateless Postgres compute node is launched by sending a tarball
8 : //! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
9 : //! generated pg_control and dummy segment of WAL.
10 : //! This module is responsible for creation of such tarball
11 : //! from data stored in object storage.
12 : //!
13 : use anyhow::{anyhow, bail, ensure, Context};
14 : use bytes::{BufMut, Bytes, BytesMut};
15 : use fail::fail_point;
16 : use pageserver_api::key::{key_to_slru_block, Key};
17 : use postgres_ffi::pg_constants;
18 : use std::fmt::Write as FmtWrite;
19 : use std::time::SystemTime;
20 : use tokio::io;
21 : use tokio::io::AsyncWrite;
22 : use tracing::*;
23 :
24 : use tokio_tar::{Builder, EntryType, Header};
25 :
26 : use crate::context::RequestContext;
27 : use crate::pgdatadir_mapping::Version;
28 : use crate::tenant::Timeline;
29 : use pageserver_api::reltag::{RelTag, SlruKind};
30 :
31 : use postgres_ffi::dispatch_pgversion;
32 : use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
33 : use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PGDATA_SUBDIRS, PG_HBA};
34 : use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
35 : use postgres_ffi::TransactionId;
36 : use postgres_ffi::XLogFileName;
37 : use postgres_ffi::PG_TLI;
38 : use postgres_ffi::{BLCKSZ, RELSEG_SIZE, WAL_SEGMENT_SIZE};
39 : use utils::lsn::Lsn;
40 :
41 : /// Create basebackup with non-rel data in it.
42 : /// Only include relational data if 'full_backup' is true.
43 : ///
44 : /// Currently we use empty 'req_lsn' in two cases:
45 : /// * During the basebackup right after timeline creation
46 : /// * When working without safekeepers. In this situation it is important to match the lsn
47 : /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
48 : /// to start the replication.
49 607 : pub async fn send_basebackup_tarball<'a, W>(
50 607 : write: &'a mut W,
51 607 : timeline: &'a Timeline,
52 607 : req_lsn: Option<Lsn>,
53 607 : prev_lsn: Option<Lsn>,
54 607 : full_backup: bool,
55 607 : ctx: &'a RequestContext,
56 607 : ) -> anyhow::Result<()>
57 607 : where
58 607 : W: AsyncWrite + Send + Sync + Unpin,
59 607 : {
60 : // Compute postgres doesn't have any previous WAL files, but the first
61 : // record that it's going to write needs to include the LSN of the
62 : // previous record (xl_prev). We include prev_record_lsn in the
63 : // "zenith.signal" file, so that postgres can read it during startup.
64 : //
65 : // We don't keep full history of record boundaries in the page server,
66 : // however, only the predecessor of the latest record on each
67 : // timeline. So we can only provide prev_record_lsn when you take a
68 : // base backup at the end of the timeline, i.e. at last_record_lsn.
69 : // Even at the end of the timeline, we sometimes don't have a valid
70 : // prev_lsn value; that happens if the timeline was just branched from
71 : // an old LSN and it doesn't have any WAL of its own yet. We will set
72 : // prev_lsn to Lsn(0) if we cannot provide the correct value.
73 607 : let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
74 : // Backup was requested at a particular LSN. The caller should've
75 : // already checked that it's a valid LSN.
76 :
77 : // If the requested point is the end of the timeline, we can
78 : // provide prev_lsn. (get_last_record_rlsn() might return it as
79 : // zero, though, if no WAL has been generated on this timeline
80 : // yet.)
81 198 : let end_of_timeline = timeline.get_last_record_rlsn();
82 198 : if req_lsn == end_of_timeline.last {
83 158 : (end_of_timeline.prev, req_lsn)
84 : } else {
85 40 : (Lsn(0), req_lsn)
86 : }
87 : } else {
88 : // Backup was requested at end of the timeline.
89 409 : let end_of_timeline = timeline.get_last_record_rlsn();
90 409 : (end_of_timeline.prev, end_of_timeline.last)
91 : };
92 :
93 : // Consolidate the derived and the provided prev_lsn values
94 607 : let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn {
95 12 : if backup_prev != Lsn(0) {
96 10 : ensure!(backup_prev == provided_prev_lsn);
97 2 : }
98 12 : provided_prev_lsn
99 : } else {
100 595 : backup_prev
101 : };
102 :
103 607 : info!(
104 607 : "taking basebackup lsn={}, prev_lsn={} (full_backup={})",
105 607 : backup_lsn, prev_lsn, full_backup
106 607 : );
107 :
108 607 : let basebackup = Basebackup {
109 607 : ar: Builder::new_non_terminated(write),
110 607 : timeline,
111 607 : lsn: backup_lsn,
112 607 : prev_record_lsn: prev_lsn,
113 607 : full_backup,
114 607 : ctx,
115 607 : };
116 607 : basebackup
117 607 : .send_tarball()
118 607 : .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn))
119 54259 : .await
120 607 : }
121 :
122 : /// This is short-living object only for the time of tarball creation,
123 : /// created mostly to avoid passing a lot of parameters between various functions
124 : /// used for constructing tarball.
125 : struct Basebackup<'a, W>
126 : where
127 : W: AsyncWrite + Send + Sync + Unpin,
128 : {
129 : ar: Builder<&'a mut W>,
130 : timeline: &'a Timeline,
131 : lsn: Lsn,
132 : prev_record_lsn: Lsn,
133 : full_backup: bool,
134 : ctx: &'a RequestContext,
135 : }
136 :
137 : /// A sink that accepts SLRU blocks ordered by key and forwards
138 : /// full segments to the archive.
139 : struct SlruSegmentsBuilder<'a, 'b, W>
140 : where
141 : W: AsyncWrite + Send + Sync + Unpin,
142 : {
143 : ar: &'a mut Builder<&'b mut W>,
144 : buf: Vec<u8>,
145 : current_segment: Option<(SlruKind, u32)>,
146 : }
147 :
148 : impl<'a, 'b, W> SlruSegmentsBuilder<'a, 'b, W>
149 : where
150 : W: AsyncWrite + Send + Sync + Unpin,
151 : {
152 601 : fn new(ar: &'a mut Builder<&'b mut W>) -> Self {
153 601 : Self {
154 601 : ar,
155 601 : buf: Vec::new(),
156 601 : current_segment: None,
157 601 : }
158 601 : }
159 :
160 3034 : async fn add_block(&mut self, key: &Key, block: Bytes) -> anyhow::Result<()> {
161 3034 : let (kind, segno, _) = key_to_slru_block(*key)?;
162 :
163 3034 : match kind {
164 : SlruKind::Clog => {
165 1234 : ensure!(block.len() == BLCKSZ as usize || block.len() == BLCKSZ as usize + 8);
166 : }
167 : SlruKind::MultiXactMembers | SlruKind::MultiXactOffsets => {
168 1800 : ensure!(block.len() == BLCKSZ as usize);
169 : }
170 : }
171 :
172 3034 : let segment = (kind, segno);
173 2433 : match self.current_segment {
174 601 : None => {
175 601 : self.current_segment = Some(segment);
176 601 : self.buf
177 601 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
178 601 : }
179 2433 : Some(current_seg) if current_seg == segment => {
180 1195 : self.buf
181 1195 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
182 1195 : }
183 : Some(_) => {
184 1238 : self.flush().await?;
185 :
186 1238 : self.current_segment = Some(segment);
187 1238 : self.buf
188 1238 : .extend_from_slice(block.slice(..BLCKSZ as usize).as_ref());
189 : }
190 : }
191 :
192 3034 : Ok(())
193 3034 : }
194 :
195 1839 : async fn flush(&mut self) -> anyhow::Result<()> {
196 1839 : let nblocks = self.buf.len() / BLCKSZ as usize;
197 1839 : let (kind, segno) = self.current_segment.take().unwrap();
198 1839 : let segname = format!("{}/{:>04X}", kind.to_str(), segno);
199 1839 : let header = new_tar_header(&segname, self.buf.len() as u64)?;
200 1839 : self.ar.append(&header, self.buf.as_slice()).await?;
201 :
202 0 : trace!("Added to basebackup slru {} relsize {}", segname, nblocks);
203 :
204 1839 : self.buf.clear();
205 1839 :
206 1839 : Ok(())
207 1839 : }
208 :
209 601 : async fn finish(mut self) -> anyhow::Result<()> {
210 601 : if self.current_segment.is_none() || self.buf.is_empty() {
211 0 : return Ok(());
212 601 : }
213 601 :
214 601 : self.flush().await
215 601 : }
216 : }
217 :
218 : impl<'a, W> Basebackup<'a, W>
219 : where
220 : W: AsyncWrite + Send + Sync + Unpin,
221 : {
222 607 : async fn send_tarball(mut self) -> anyhow::Result<()> {
223 : // TODO include checksum
224 :
225 607 : let lazy_slru_download = self.timeline.get_lazy_slru_download() && !self.full_backup;
226 :
227 : // Create pgdata subdirs structure
228 13354 : for dir in PGDATA_SUBDIRS.iter() {
229 13354 : let header = new_tar_header_dir(dir)?;
230 13354 : self.ar
231 13354 : .append(&header, &mut io::empty())
232 0 : .await
233 13354 : .context("could not add directory to basebackup tarball")?;
234 : }
235 :
236 : // Send config files.
237 1821 : for filepath in PGDATA_SPECIAL_FILES.iter() {
238 1821 : if *filepath == "pg_hba.conf" {
239 607 : let data = PG_HBA.as_bytes();
240 607 : let header = new_tar_header(filepath, data.len() as u64)?;
241 607 : self.ar
242 607 : .append(&header, data)
243 0 : .await
244 607 : .context("could not add config file to basebackup tarball")?;
245 : } else {
246 1214 : let header = new_tar_header(filepath, 0)?;
247 1214 : self.ar
248 1214 : .append(&header, &mut io::empty())
249 0 : .await
250 1214 : .context("could not add config file to basebackup tarball")?;
251 : }
252 : }
253 607 : if !lazy_slru_download {
254 : // Gather non-relational files from object storage pages.
255 607 : let slru_partitions = self
256 607 : .timeline
257 607 : .get_slru_keyspace(Version::Lsn(self.lsn), self.ctx)
258 241 : .await?
259 601 : .partition(Timeline::MAX_GET_VECTORED_KEYS * BLCKSZ as u64);
260 601 :
261 601 : let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar);
262 :
263 1241 : for part in slru_partitions.parts {
264 640 : let blocks = self
265 640 : .timeline
266 640 : .get_vectored(&part.ranges, self.lsn, self.ctx)
267 37899 : .await?;
268 :
269 3674 : for (key, block) in blocks {
270 3034 : slru_builder.add_block(&key, block?).await?;
271 : }
272 : }
273 601 : slru_builder.finish().await?;
274 0 : }
275 :
276 601 : let mut min_restart_lsn: Lsn = Lsn::MAX;
277 : // Create tablespace directories
278 2416 : for ((spcnode, dbnode), has_relmap_file) in
279 601 : self.timeline.list_dbdirs(self.lsn, self.ctx).await?
280 : {
281 2416 : self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
282 :
283 : // If full backup is requested, include all relation files.
284 : // Otherwise only include init forks of unlogged relations.
285 2416 : let rels = self
286 2416 : .timeline
287 2416 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
288 85 : .await?;
289 567666 : for &rel in rels.iter() {
290 : // Send init fork as main fork to provide well formed empty
291 : // contents of UNLOGGED relations. Postgres copies it in
292 : // `reinit.c` during recovery.
293 567666 : if rel.forknum == INIT_FORKNUM {
294 : // I doubt we need _init fork itself, but having it at least
295 : // serves as a marker relation is unlogged.
296 2 : self.add_rel(rel, rel).await?;
297 2 : self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
298 2 : continue;
299 567664 : }
300 567664 :
301 567664 : if self.full_backup {
302 14014 : if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
303 : {
304 : // skip this, will include it when we reach the init fork
305 0 : continue;
306 14014 : }
307 14014 : self.add_rel(rel, rel).await?;
308 553650 : }
309 : }
310 :
311 2416 : for (path, content) in self.timeline.list_aux_files(self.lsn, self.ctx).await? {
312 688 : if path.starts_with("pg_replslot") {
313 16 : let offs = pg_constants::REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN;
314 16 : let restart_lsn = Lsn(u64::from_le_bytes(
315 16 : content[offs..offs + 8].try_into().unwrap(),
316 16 : ));
317 16 : info!("Replication slot {} restart LSN={}", path, restart_lsn);
318 16 : min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn);
319 672 : }
320 688 : let header = new_tar_header(&path, content.len() as u64)?;
321 688 : self.ar
322 688 : .append(&header, &*content)
323 8 : .await
324 688 : .context("could not add aux file to basebackup tarball")?;
325 : }
326 : }
327 601 : if min_restart_lsn != Lsn::MAX {
328 4 : info!(
329 4 : "Min restart LSN for logical replication is {}",
330 4 : min_restart_lsn
331 4 : );
332 4 : let data = min_restart_lsn.0.to_le_bytes();
333 4 : let header = new_tar_header("restart.lsn", data.len() as u64)?;
334 4 : self.ar
335 4 : .append(&header, &data[..])
336 0 : .await
337 4 : .context("could not add restart.lsn file to basebackup tarball")?;
338 597 : }
339 601 : for xid in self
340 601 : .timeline
341 601 : .list_twophase_files(self.lsn, self.ctx)
342 120 : .await?
343 : {
344 2 : self.add_twophase_file(xid).await?;
345 : }
346 :
347 601 : fail_point!("basebackup-before-control-file", |_| {
348 6 : bail!("failpoint basebackup-before-control-file")
349 601 : });
350 :
351 : // Generate pg_control and bootstrap WAL segment.
352 7765 : self.add_pgcontrol_file().await?;
353 595 : self.ar.finish().await?;
354 0 : debug!("all tarred up!");
355 595 : Ok(())
356 607 : }
357 :
358 : /// Add contents of relfilenode `src`, naming it as `dst`.
359 14018 : async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> anyhow::Result<()> {
360 14018 : let nblocks = self
361 14018 : .timeline
362 14018 : .get_rel_size(src, Version::Lsn(self.lsn), false, self.ctx)
363 605 : .await?;
364 :
365 : // If the relation is empty, create an empty file
366 14018 : if nblocks == 0 {
367 2400 : let file_name = dst.to_segfile_name(0);
368 2400 : let header = new_tar_header(&file_name, 0)?;
369 2400 : self.ar.append(&header, &mut io::empty()).await?;
370 2400 : return Ok(());
371 11618 : }
372 11618 :
373 11618 : // Add a file for each chunk of blocks (aka segment)
374 11618 : let mut startblk = 0;
375 11618 : let mut seg = 0;
376 23236 : while startblk < nblocks {
377 11618 : let endblk = std::cmp::min(startblk + RELSEG_SIZE, nblocks);
378 11618 :
379 11618 : let mut segment_data: Vec<u8> = vec![];
380 46702 : for blknum in startblk..endblk {
381 46702 : let img = self
382 46702 : .timeline
383 46702 : .get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), false, self.ctx)
384 6461 : .await?;
385 46702 : segment_data.extend_from_slice(&img[..]);
386 : }
387 :
388 11618 : let file_name = dst.to_segfile_name(seg as u32);
389 11618 : let header = new_tar_header(&file_name, segment_data.len() as u64)?;
390 11618 : self.ar.append(&header, segment_data.as_slice()).await?;
391 :
392 11618 : seg += 1;
393 11618 : startblk = endblk;
394 : }
395 :
396 11618 : Ok(())
397 14018 : }
398 :
399 : //
400 : // Include database/tablespace directories.
401 : //
402 : // Each directory contains a PG_VERSION file, and the default database
403 : // directories also contain pg_filenode.map files.
404 : //
405 2416 : async fn add_dbdir(
406 2416 : &mut self,
407 2416 : spcnode: u32,
408 2416 : dbnode: u32,
409 2416 : has_relmap_file: bool,
410 2416 : ) -> anyhow::Result<()> {
411 2416 : let relmap_img = if has_relmap_file {
412 2414 : let img = self
413 2414 : .timeline
414 2414 : .get_relmap_file(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
415 296 : .await?;
416 :
417 2414 : ensure!(
418 2414 : img.len()
419 2414 : == dispatch_pgversion!(
420 2414 : self.timeline.pg_version,
421 2414 : pgv::bindings::SIZEOF_RELMAPFILE
422 : )
423 : );
424 :
425 2414 : Some(img)
426 : } else {
427 2 : None
428 : };
429 :
430 2416 : if spcnode == GLOBALTABLESPACE_OID {
431 601 : let pg_version_str = match self.timeline.pg_version {
432 601 : 14 | 15 => self.timeline.pg_version.to_string(),
433 0 : ver => format!("{ver}\x0A"),
434 : };
435 601 : let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
436 601 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
437 :
438 601 : info!("timeline.pg_version {}", self.timeline.pg_version);
439 :
440 601 : if let Some(img) = relmap_img {
441 : // filenode map for global tablespace
442 601 : let header = new_tar_header("global/pg_filenode.map", img.len() as u64)?;
443 601 : self.ar.append(&header, &img[..]).await?;
444 : } else {
445 0 : warn!("global/pg_filenode.map is missing");
446 : }
447 : } else {
448 : // User defined tablespaces are not supported. However, as
449 : // a special case, if a tablespace/db directory is
450 : // completely empty, we can leave it out altogether. This
451 : // makes taking a base backup after the 'tablespace'
452 : // regression test pass, because the test drops the
453 : // created tablespaces after the tests.
454 : //
455 : // FIXME: this wouldn't be necessary, if we handled
456 : // XLOG_TBLSPC_DROP records. But we probably should just
457 : // throw an error on CREATE TABLESPACE in the first place.
458 1815 : if !has_relmap_file
459 2 : && self
460 2 : .timeline
461 2 : .list_rels(spcnode, dbnode, Version::Lsn(self.lsn), self.ctx)
462 0 : .await?
463 2 : .is_empty()
464 : {
465 2 : return Ok(());
466 1813 : }
467 1813 : // User defined tablespaces are not supported
468 1813 : ensure!(spcnode == DEFAULTTABLESPACE_OID);
469 :
470 : // Append dir path for each database
471 1813 : let path = format!("base/{}", dbnode);
472 1813 : let header = new_tar_header_dir(&path)?;
473 1813 : self.ar.append(&header, &mut io::empty()).await?;
474 :
475 1813 : if let Some(img) = relmap_img {
476 1813 : let dst_path = format!("base/{}/PG_VERSION", dbnode);
477 :
478 1813 : let pg_version_str = match self.timeline.pg_version {
479 1813 : 14 | 15 => self.timeline.pg_version.to_string(),
480 0 : ver => format!("{ver}\x0A"),
481 : };
482 1813 : let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
483 1813 : self.ar.append(&header, pg_version_str.as_bytes()).await?;
484 :
485 1813 : let relmap_path = format!("base/{}/pg_filenode.map", dbnode);
486 1813 : let header = new_tar_header(&relmap_path, img.len() as u64)?;
487 1813 : self.ar.append(&header, &img[..]).await?;
488 0 : }
489 : };
490 2414 : Ok(())
491 2416 : }
492 :
493 : //
494 : // Extract twophase state files
495 : //
496 2 : async fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
497 2 : let img = self
498 2 : .timeline
499 2 : .get_twophase_file(xid, self.lsn, self.ctx)
500 0 : .await?;
501 :
502 2 : let mut buf = BytesMut::new();
503 2 : buf.extend_from_slice(&img[..]);
504 2 : let crc = crc32c::crc32c(&img[..]);
505 2 : buf.put_u32_le(crc);
506 2 : let path = format!("pg_twophase/{:>08X}", xid);
507 2 : let header = new_tar_header(&path, buf.len() as u64)?;
508 2 : self.ar.append(&header, &buf[..]).await?;
509 :
510 2 : Ok(())
511 2 : }
512 :
513 : //
514 : // Add generated pg_control file and bootstrap WAL segment.
515 : // Also send zenith.signal file with extra bootstrap data.
516 : //
517 595 : async fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
518 595 : // add zenith.signal file
519 595 : let mut zenith_signal = String::new();
520 595 : if self.prev_record_lsn == Lsn(0) {
521 47 : if self.lsn == self.timeline.get_ancestor_lsn() {
522 9 : write!(zenith_signal, "PREV LSN: none")?;
523 : } else {
524 38 : write!(zenith_signal, "PREV LSN: invalid")?;
525 : }
526 : } else {
527 548 : write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn)?;
528 : }
529 595 : self.ar
530 595 : .append(
531 595 : &new_tar_header("zenith.signal", zenith_signal.len() as u64)?,
532 595 : zenith_signal.as_bytes(),
533 : )
534 2 : .await?;
535 :
536 595 : let checkpoint_bytes = self
537 595 : .timeline
538 595 : .get_checkpoint(self.lsn, self.ctx)
539 32 : .await
540 595 : .context("failed to get checkpoint bytes")?;
541 595 : let pg_control_bytes = self
542 595 : .timeline
543 595 : .get_control_file(self.lsn, self.ctx)
544 28 : .await
545 595 : .context("failed get control bytes")?;
546 :
547 595 : let (pg_control_bytes, system_identifier) = postgres_ffi::generate_pg_control(
548 595 : &pg_control_bytes,
549 595 : &checkpoint_bytes,
550 595 : self.lsn,
551 595 : self.timeline.pg_version,
552 595 : )?;
553 :
554 : //send pg_control
555 595 : let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
556 595 : self.ar.append(&header, &pg_control_bytes[..]).await?;
557 :
558 : //send wal segment
559 595 : let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE);
560 595 : let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE);
561 595 : let wal_file_path = format!("pg_wal/{}", wal_file_name);
562 595 : let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?;
563 :
564 595 : let wal_seg = postgres_ffi::generate_wal_segment(
565 595 : segno,
566 595 : system_identifier,
567 595 : self.timeline.pg_version,
568 595 : self.lsn,
569 595 : )
570 595 : .map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
571 595 : ensure!(wal_seg.len() == WAL_SEGMENT_SIZE);
572 7702 : self.ar.append(&header, &wal_seg[..]).await?;
573 595 : Ok(())
574 595 : }
575 : }
576 :
577 : //
578 : // Create new tarball entry header
579 : //
580 24985 : fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
581 24985 : let mut header = Header::new_gnu();
582 24985 : header.set_size(size);
583 24985 : header.set_path(path)?;
584 24985 : header.set_mode(0b110000000); // -rw-------
585 24985 : header.set_mtime(
586 24985 : // use currenttime as last modified time
587 24985 : SystemTime::now()
588 24985 : .duration_since(SystemTime::UNIX_EPOCH)
589 24985 : .unwrap()
590 24985 : .as_secs(),
591 24985 : );
592 24985 : header.set_cksum();
593 24985 : Ok(header)
594 24985 : }
595 :
596 15167 : fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
597 15167 : let mut header = Header::new_gnu();
598 15167 : header.set_size(0);
599 15167 : header.set_path(path)?;
600 15167 : header.set_mode(0o755); // -rw-------
601 15167 : header.set_entry_type(EntryType::dir());
602 15167 : header.set_mtime(
603 15167 : // use currenttime as last modified time
604 15167 : SystemTime::now()
605 15167 : .duration_since(SystemTime::UNIX_EPOCH)
606 15167 : .unwrap()
607 15167 : .as_secs(),
608 15167 : );
609 15167 : header.set_cksum();
610 15167 : Ok(header)
611 15167 : }
|