LCOV - code coverage report
Current view: top level - safekeeper/src - pull_timeline.rs (source / functions) Coverage Total Hit
Test: 6df3fc19ec669bcfbbf9aba41d1338898d24eaa0.info Lines: 0.0 % 298 0
Test Date: 2025-03-12 18:28:53 Functions: 0.0 % 32 0

            Line data    Source code
       1              : use std::cmp::min;
       2              : use std::io::{self, ErrorKind};
       3              : use std::sync::Arc;
       4              : 
       5              : use anyhow::{Context, Result, anyhow, bail};
       6              : use bytes::Bytes;
       7              : use camino::Utf8PathBuf;
       8              : use chrono::{DateTime, Utc};
       9              : use futures::{SinkExt, StreamExt, TryStreamExt};
      10              : use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
      11              : use safekeeper_api::Term;
      12              : use safekeeper_api::models::{PullTimelineRequest, PullTimelineResponse, TimelineStatus};
      13              : use safekeeper_client::mgmt_api;
      14              : use safekeeper_client::mgmt_api::Client;
      15              : use serde::Deserialize;
      16              : use tokio::fs::OpenOptions;
      17              : use tokio::io::AsyncWrite;
      18              : use tokio::sync::mpsc;
      19              : use tokio::task;
      20              : use tokio_tar::{Archive, Builder, Header};
      21              : use tokio_util::io::{CopyToBytes, SinkWriter};
      22              : use tokio_util::sync::PollSender;
      23              : use tracing::{error, info, instrument};
      24              : use utils::crashsafe::fsync_async_opt;
      25              : use utils::id::{NodeId, TenantTimelineId};
      26              : use utils::logging::SecretString;
      27              : use utils::lsn::Lsn;
      28              : use utils::pausable_failpoint;
      29              : 
      30              : use crate::control_file::CONTROL_FILE_NAME;
      31              : use crate::state::{EvictionState, TimelinePersistentState};
      32              : use crate::timeline::{Timeline, WalResidentTimeline};
      33              : use crate::timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline};
      34              : use crate::wal_storage::open_wal_file;
      35              : use crate::{GlobalTimelines, debug_dump, wal_backup};
      36              : 
      37              : /// Stream tar archive of timeline to tx.
      38              : #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
      39              : pub async fn stream_snapshot(
      40              :     tli: Arc<Timeline>,
      41              :     source: NodeId,
      42              :     destination: NodeId,
      43              :     tx: mpsc::Sender<Result<Bytes>>,
      44              : ) {
      45              :     match tli.try_wal_residence_guard().await {
      46              :         Err(e) => {
      47              :             tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
      48              :                 .await
      49              :                 .ok();
      50              :         }
      51              :         Ok(maybe_resident_tli) => {
      52              :             if let Err(e) = match maybe_resident_tli {
      53              :                 Some(resident_tli) => {
      54              :                     stream_snapshot_resident_guts(resident_tli, source, destination, tx.clone())
      55              :                         .await
      56              :                 }
      57              :                 None => stream_snapshot_offloaded_guts(tli, source, destination, tx.clone()).await,
      58              :             } {
      59              :                 // Error type/contents don't matter as they won't can't reach the client
      60              :                 // (hyper likely doesn't do anything with it), but http stream will be
      61              :                 // prematurely terminated. It would be nice to try to send the error in
      62              :                 // trailers though.
      63              :                 tx.send(Err(anyhow!("snapshot failed"))).await.ok();
      64              :                 error!("snapshot failed: {:#}", e);
      65              :             }
      66              :         }
      67              :     }
      68              : }
      69              : 
      70              : /// State needed while streaming the snapshot.
      71              : pub struct SnapshotContext {
      72              :     pub from_segno: XLogSegNo, // including
      73              :     pub upto_segno: XLogSegNo, // including
      74              :     pub term: Term,
      75              :     pub last_log_term: Term,
      76              :     pub flush_lsn: Lsn,
      77              :     pub wal_seg_size: usize,
      78              :     // used to remove WAL hold off in Drop.
      79              :     pub tli: WalResidentTimeline,
      80              : }
      81              : 
      82              : impl Drop for SnapshotContext {
      83            0 :     fn drop(&mut self) {
      84            0 :         let tli = self.tli.clone();
      85            0 :         task::spawn(async move {
      86            0 :             let mut shared_state = tli.write_shared_state().await;
      87            0 :             shared_state.wal_removal_on_hold = false;
      88            0 :         });
      89            0 :     }
      90              : }
      91              : 
      92              : /// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
      93            0 : fn prepare_tar_stream(
      94            0 :     tx: mpsc::Sender<Result<Bytes>>,
      95            0 : ) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
      96            0 :     // tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
      97            0 :     // use SinkWriter as a Write impl. That is,
      98            0 :     // - create Sink from the tx. It returns PollSendError if chan is closed.
      99            0 :     let sink = PollSender::new(tx);
     100            0 :     // - SinkWriter needs sink error to be io one, map it.
     101            0 :     let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
     102            0 :     // - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
     103            0 :     //   it with with(). Note that with() accepts async function which we don't
     104            0 :     //   need and allows the map to fail, which we don't need either, but hence
     105            0 :     //   two Oks.
     106            0 :     let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
     107            0 :     // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
     108            0 :     // into CopyToBytes. This is a data copy.
     109            0 :     let copy_to_bytes = CopyToBytes::new(oksink);
     110            0 :     let writer = SinkWriter::new(copy_to_bytes);
     111            0 :     let pinned_writer = Box::pin(writer);
     112            0 : 
     113            0 :     // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
     114            0 :     // which is also likely suboptimal.
     115            0 :     Builder::new_non_terminated(pinned_writer)
     116            0 : }
     117              : 
     118              : /// Implementation of snapshot for an offloaded timeline, only reads control file
     119            0 : pub(crate) async fn stream_snapshot_offloaded_guts(
     120            0 :     tli: Arc<Timeline>,
     121            0 :     source: NodeId,
     122            0 :     destination: NodeId,
     123            0 :     tx: mpsc::Sender<Result<Bytes>>,
     124            0 : ) -> Result<()> {
     125            0 :     let mut ar = prepare_tar_stream(tx);
     126            0 : 
     127            0 :     tli.snapshot_offloaded(&mut ar, source, destination).await?;
     128              : 
     129            0 :     ar.finish().await?;
     130              : 
     131            0 :     Ok(())
     132            0 : }
     133              : 
     134              : /// Implementation of snapshot for a timeline which is resident (includes some segment data)
     135            0 : pub async fn stream_snapshot_resident_guts(
     136            0 :     tli: WalResidentTimeline,
     137            0 :     source: NodeId,
     138            0 :     destination: NodeId,
     139            0 :     tx: mpsc::Sender<Result<Bytes>>,
     140            0 : ) -> Result<()> {
     141            0 :     let mut ar = prepare_tar_stream(tx);
     142              : 
     143            0 :     let bctx = tli.start_snapshot(&mut ar, source, destination).await?;
     144            0 :     pausable_failpoint!("sk-snapshot-after-list-pausable");
     145              : 
     146            0 :     let tli_dir = tli.get_timeline_dir();
     147            0 :     info!(
     148            0 :         "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
     149            0 :         bctx.upto_segno - bctx.from_segno + 1,
     150              :         bctx.from_segno,
     151              :         bctx.upto_segno,
     152              :         bctx.term,
     153              :         bctx.last_log_term,
     154              :         bctx.flush_lsn,
     155              :     );
     156            0 :     for segno in bctx.from_segno..=bctx.upto_segno {
     157            0 :         let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?;
     158            0 :         let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
     159            0 :         if is_partial {
     160            0 :             wal_file_name.push_str(".partial");
     161            0 :         }
     162            0 :         ar.append_file(&wal_file_name, &mut sf).await?;
     163              :     }
     164              : 
     165              :     // Do the term check before ar.finish to make archive corrupted in case of
     166              :     // term change. Client shouldn't ignore abrupt stream end, but to be sure.
     167            0 :     tli.finish_snapshot(&bctx).await?;
     168              : 
     169            0 :     ar.finish().await?;
     170              : 
     171            0 :     Ok(())
     172            0 : }
     173              : 
     174              : impl Timeline {
     175              :     /// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
     176              :     /// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
     177              :     /// we are offloaded and there aren't any)
     178            0 :     async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
     179            0 :         self: &Arc<Timeline>,
     180            0 :         ar: &mut tokio_tar::Builder<W>,
     181            0 :         source: NodeId,
     182            0 :         destination: NodeId,
     183            0 :     ) -> Result<()> {
     184              :         // Take initial copy of control file, then release state lock
     185            0 :         let mut control_file = {
     186            0 :             let shared_state = self.write_shared_state().await;
     187              : 
     188            0 :             let control_file = TimelinePersistentState::clone(shared_state.sk.state());
     189              : 
     190              :             // Rare race: we got unevicted between entering function and reading control file.
     191              :             // We error out and let API caller retry.
     192            0 :             if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
     193            0 :                 bail!("Timeline was un-evicted during snapshot, please retry");
     194            0 :             }
     195            0 : 
     196            0 :             control_file
     197              :         };
     198              : 
     199              :         // Modify the partial segment of the in-memory copy for the control file to
     200              :         // point to the destination safekeeper.
     201            0 :         let replace = control_file
     202            0 :             .partial_backup
     203            0 :             .replace_uploaded_segment(source, destination)?;
     204              : 
     205            0 :         let Some(replace) = replace else {
     206              :             // In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
     207              :             // has a partial segment.  It is unexpected that
     208            0 :             anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
     209              :         };
     210              : 
     211            0 :         tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
     212              : 
     213              :         // Optimistically try to copy the partial segment to the destination's path: this
     214              :         // can fail if the timeline was un-evicted and modified in the background.
     215            0 :         let remote_timeline_path = &self.remote_path;
     216            0 :         wal_backup::copy_partial_segment(
     217            0 :             &replace.previous.remote_path(remote_timeline_path),
     218            0 :             &replace.current.remote_path(remote_timeline_path),
     219            0 :         )
     220            0 :         .await?;
     221              : 
     222              :         // Since the S3 copy succeeded with the path given in our control file snapshot, and
     223              :         // we are sending that snapshot in our response, we are giving the caller a consistent
     224              :         // snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
     225            0 :         let buf = control_file
     226            0 :             .write_to_buf()
     227            0 :             .with_context(|| "failed to serialize control store")?;
     228            0 :         let mut header = Header::new_gnu();
     229            0 :         header.set_size(buf.len().try_into().expect("never breaches u64"));
     230            0 :         ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
     231            0 :             .await
     232            0 :             .with_context(|| "failed to append to archive")?;
     233              : 
     234            0 :         Ok(())
     235            0 :     }
     236              : }
     237              : 
     238              : impl WalResidentTimeline {
     239              :     /// Start streaming tar archive with timeline:
     240              :     /// 1) stream control file under lock;
     241              :     /// 2) hold off WAL removal;
     242              :     /// 3) collect SnapshotContext to understand which WAL segments should be
     243              :     ///    streamed.
     244              :     ///
     245              :     /// Snapshot streams data up to flush_lsn. To make this safe, we must check
     246              :     /// that term doesn't change during the procedure, or we risk sending mix of
     247              :     /// WAL from different histories. Term is remembered in the SnapshotContext
     248              :     /// and checked in finish_snapshot. Note that in the last segment some WAL
     249              :     /// higher than flush_lsn set here might be streamed; that's fine as long as
     250              :     /// terms doesn't change.
     251              :     ///
     252              :     /// Alternatively we could send only up to commit_lsn to get some valid
     253              :     /// state which later will be recovered by compute, in this case term check
     254              :     /// is not needed, but we likely don't want that as there might be no
     255              :     /// compute which could perform the recovery.
     256              :     ///
     257              :     /// When returned SnapshotContext is dropped WAL hold is removed.
     258            0 :     async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
     259            0 :         &self,
     260            0 :         ar: &mut tokio_tar::Builder<W>,
     261            0 :         source: NodeId,
     262            0 :         destination: NodeId,
     263            0 :     ) -> Result<SnapshotContext> {
     264            0 :         let mut shared_state = self.write_shared_state().await;
     265            0 :         let wal_seg_size = shared_state.get_wal_seg_size();
     266            0 : 
     267            0 :         let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
     268              :         // Modify the partial segment of the in-memory copy for the control file to
     269              :         // point to the destination safekeeper.
     270            0 :         let replace = control_store
     271            0 :             .partial_backup
     272            0 :             .replace_uploaded_segment(source, destination)?;
     273              : 
     274            0 :         if let Some(replace) = replace {
     275              :             // The deserialized control file has an uploaded partial. We upload a copy
     276              :             // of it to object storage for the destination safekeeper and send an updated
     277              :             // control file in the snapshot.
     278            0 :             tracing::info!(
     279            0 :                 "Replacing uploaded partial segment in in-mem control file: {replace:?}"
     280              :             );
     281              : 
     282            0 :             let remote_timeline_path = &self.tli.remote_path;
     283            0 :             wal_backup::copy_partial_segment(
     284            0 :                 &replace.previous.remote_path(remote_timeline_path),
     285            0 :                 &replace.current.remote_path(remote_timeline_path),
     286            0 :             )
     287            0 :             .await?;
     288            0 :         }
     289              : 
     290            0 :         let buf = control_store
     291            0 :             .write_to_buf()
     292            0 :             .with_context(|| "failed to serialize control store")?;
     293            0 :         let mut header = Header::new_gnu();
     294            0 :         header.set_size(buf.len().try_into().expect("never breaches u64"));
     295            0 :         ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
     296            0 :             .await
     297            0 :             .with_context(|| "failed to append to archive")?;
     298              : 
     299              :         // We need to stream since the oldest segment someone (s3 or pageserver)
     300              :         // still needs. This duplicates calc_horizon_lsn logic.
     301              :         //
     302              :         // We know that WAL wasn't removed up to this point because it cannot be
     303              :         // removed further than `backup_lsn`. Since we're holding shared_state
     304              :         // lock and setting `wal_removal_on_hold` later, it guarantees that WAL
     305              :         // won't be removed until we're done.
     306            0 :         let from_lsn = min(
     307            0 :             shared_state.sk.state().remote_consistent_lsn,
     308            0 :             shared_state.sk.state().backup_lsn,
     309            0 :         );
     310            0 :         if from_lsn == Lsn::INVALID {
     311              :             // this is possible if snapshot is called before handling first
     312              :             // elected message
     313            0 :             bail!("snapshot is called on uninitialized timeline");
     314            0 :         }
     315            0 :         let from_segno = from_lsn.segment_number(wal_seg_size);
     316            0 :         let term = shared_state.sk.state().acceptor_state.term;
     317            0 :         let last_log_term = shared_state.sk.last_log_term();
     318            0 :         let flush_lsn = shared_state.sk.flush_lsn();
     319            0 :         let upto_segno = flush_lsn.segment_number(wal_seg_size);
     320              :         // have some limit on max number of segments as a sanity check
     321              :         const MAX_ALLOWED_SEGS: u64 = 1000;
     322            0 :         let num_segs = upto_segno - from_segno + 1;
     323            0 :         if num_segs > MAX_ALLOWED_SEGS {
     324            0 :             bail!(
     325            0 :                 "snapshot is called on timeline with {} segments, but the limit is {}",
     326            0 :                 num_segs,
     327            0 :                 MAX_ALLOWED_SEGS
     328            0 :             );
     329            0 :         }
     330            0 : 
     331            0 :         // Prevent WAL removal while we're streaming data.
     332            0 :         //
     333            0 :         // Since this a flag, not a counter just bail out if already set; we
     334            0 :         // shouldn't need concurrent snapshotting.
     335            0 :         if shared_state.wal_removal_on_hold {
     336            0 :             bail!("wal_removal_on_hold is already true");
     337            0 :         }
     338            0 :         shared_state.wal_removal_on_hold = true;
     339            0 : 
     340            0 :         // Drop shared_state to release the lock, before calling wal_residence_guard().
     341            0 :         drop(shared_state);
     342              : 
     343            0 :         let tli_copy = self.wal_residence_guard().await?;
     344            0 :         let bctx = SnapshotContext {
     345            0 :             from_segno,
     346            0 :             upto_segno,
     347            0 :             term,
     348            0 :             last_log_term,
     349            0 :             flush_lsn,
     350            0 :             wal_seg_size,
     351            0 :             tli: tli_copy,
     352            0 :         };
     353            0 : 
     354            0 :         Ok(bctx)
     355            0 :     }
     356              : 
     357              :     /// Finish snapshotting: check that term(s) hasn't changed.
     358              :     ///
     359              :     /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
     360              :     /// forget this if snapshotting fails mid the way.
     361            0 :     pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
     362            0 :         let shared_state = self.read_shared_state().await;
     363            0 :         let term = shared_state.sk.state().acceptor_state.term;
     364            0 :         let last_log_term = shared_state.sk.last_log_term();
     365            0 :         // There are some cases to relax this check (e.g. last_log_term might
     366            0 :         // change, but as long as older history is strictly part of new that's
     367            0 :         // fine), but there is no need to do it.
     368            0 :         if bctx.term != term || bctx.last_log_term != last_log_term {
     369            0 :             bail!(
     370            0 :                 "term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
     371            0 :                 bctx.term,
     372            0 :                 bctx.last_log_term,
     373            0 :                 term,
     374            0 :                 last_log_term
     375            0 :             );
     376            0 :         }
     377            0 :         Ok(())
     378            0 :     }
     379              : }
     380              : 
     381              : /// Response for debug dump request.
     382            0 : #[derive(Debug, Deserialize)]
     383              : pub struct DebugDumpResponse {
     384              :     pub start_time: DateTime<Utc>,
     385              :     pub finish_time: DateTime<Utc>,
     386              :     pub timelines: Vec<debug_dump::Timeline>,
     387              :     pub timelines_count: usize,
     388              :     pub config: debug_dump::Config,
     389              : }
     390              : 
     391              : /// Find the most advanced safekeeper and pull timeline from it.
     392            0 : pub async fn handle_request(
     393            0 :     request: PullTimelineRequest,
     394            0 :     sk_auth_token: Option<SecretString>,
     395            0 :     global_timelines: Arc<GlobalTimelines>,
     396            0 : ) -> Result<PullTimelineResponse> {
     397            0 :     let existing_tli = global_timelines.get(TenantTimelineId::new(
     398            0 :         request.tenant_id,
     399            0 :         request.timeline_id,
     400            0 :     ));
     401            0 :     if existing_tli.is_ok() {
     402            0 :         bail!("Timeline {} already exists", request.timeline_id);
     403            0 :     }
     404            0 : 
     405            0 :     // TODO(DimasKovas): add ssl root CA certificate when implementing safekeeper's
     406            0 :     // part of https support (#24836).
     407            0 :     let http_client = reqwest::Client::new();
     408            0 : 
     409            0 :     let http_hosts = request.http_hosts.clone();
     410              : 
     411              :     // Figure out statuses of potential donors.
     412            0 :     let responses: Vec<Result<TimelineStatus, mgmt_api::Error>> =
     413            0 :         futures::future::join_all(http_hosts.iter().map(|url| async {
     414            0 :             let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone());
     415            0 :             let info = cclient
     416            0 :                 .timeline_status(request.tenant_id, request.timeline_id)
     417            0 :                 .await?;
     418            0 :             Ok(info)
     419            0 :         }))
     420            0 :         .await;
     421              : 
     422            0 :     let mut statuses = Vec::new();
     423            0 :     for (i, response) in responses.into_iter().enumerate() {
     424            0 :         let status = response.context(format!("fetching status from {}", http_hosts[i]))?;
     425            0 :         statuses.push((status, i));
     426              :     }
     427              : 
     428              :     // Find the most advanced safekeeper
     429            0 :     let (status, i) = statuses
     430            0 :         .into_iter()
     431            0 :         .max_by_key(|(status, _)| {
     432            0 :             (
     433            0 :                 status.acceptor_state.epoch,
     434            0 :                 status.flush_lsn,
     435            0 :                 status.commit_lsn,
     436            0 :             )
     437            0 :         })
     438            0 :         .unwrap();
     439            0 :     let safekeeper_host = http_hosts[i].clone();
     440            0 : 
     441            0 :     assert!(status.tenant_id == request.tenant_id);
     442            0 :     assert!(status.timeline_id == request.timeline_id);
     443              : 
     444            0 :     pull_timeline(status, safekeeper_host, sk_auth_token, global_timelines).await
     445            0 : }
     446              : 
     447            0 : async fn pull_timeline(
     448            0 :     status: TimelineStatus,
     449            0 :     host: String,
     450            0 :     sk_auth_token: Option<SecretString>,
     451            0 :     global_timelines: Arc<GlobalTimelines>,
     452            0 : ) -> Result<PullTimelineResponse> {
     453            0 :     let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
     454            0 :     info!(
     455            0 :         "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
     456              :         ttid,
     457              :         host,
     458              :         status.commit_lsn,
     459              :         status.flush_lsn,
     460              :         status.acceptor_state.term,
     461              :         status.acceptor_state.epoch
     462              :     );
     463              : 
     464            0 :     let conf = &global_timelines.get_global_config();
     465              : 
     466            0 :     let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
     467              :     // TODO(DimasKovas): add ssl root CA certificate when implementing safekeeper's
     468              :     // part of https support (#24836).
     469            0 :     let http_client = reqwest::Client::new();
     470            0 :     let client = Client::new(http_client, host.clone(), sk_auth_token.clone());
     471              :     // Request stream with basebackup archive.
     472            0 :     let bb_resp = client
     473            0 :         .snapshot(status.tenant_id, status.timeline_id, conf.my_id)
     474            0 :         .await?;
     475              : 
     476              :     // Make Stream of Bytes from it...
     477            0 :     let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
     478            0 :     // and turn it into StreamReader implementing AsyncRead.
     479            0 :     let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
     480              : 
     481              :     // Extract it on the fly to the disk. We don't use simple unpack() to fsync
     482              :     // files.
     483            0 :     let mut entries = Archive::new(bb_reader).entries()?;
     484            0 :     while let Some(base_tar_entry) = entries.next().await {
     485            0 :         let mut entry = base_tar_entry?;
     486            0 :         let header = entry.header();
     487            0 :         let file_path = header.path()?.into_owned();
     488            0 :         match header.entry_type() {
     489              :             tokio_tar::EntryType::Regular => {
     490            0 :                 let utf8_file_path =
     491            0 :                     Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
     492            0 :                 let dst_path = tli_dir_path.join(utf8_file_path);
     493            0 :                 let mut f = OpenOptions::new()
     494            0 :                     .create(true)
     495            0 :                     .truncate(true)
     496            0 :                     .write(true)
     497            0 :                     .open(&dst_path)
     498            0 :                     .await?;
     499            0 :                 tokio::io::copy(&mut entry, &mut f).await?;
     500              :                 // fsync the file
     501            0 :                 f.sync_all().await?;
     502              :             }
     503              :             _ => {
     504            0 :                 bail!(
     505            0 :                     "entry {} in backup tar archive is of unexpected type: {:?}",
     506            0 :                     file_path.display(),
     507            0 :                     header.entry_type()
     508            0 :                 );
     509              :             }
     510              :         }
     511              :     }
     512              : 
     513              :     // fsync temp timeline directory to remember its contents.
     514            0 :     fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
     515              : 
     516              :     // Let's create timeline from temp directory and verify that it's correct
     517            0 :     let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?;
     518            0 :     info!(
     519            0 :         "finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
     520              :         ttid, commit_lsn, flush_lsn
     521              :     );
     522            0 :     assert!(status.commit_lsn <= status.flush_lsn);
     523              : 
     524              :     // Finally, load the timeline.
     525            0 :     let _tli = global_timelines
     526            0 :         .load_temp_timeline(ttid, &tli_dir_path, false)
     527            0 :         .await?;
     528              : 
     529            0 :     Ok(PullTimelineResponse {
     530            0 :         safekeeper_host: host,
     531            0 :     })
     532            0 : }
        

Generated by: LCOV version 2.1-beta