LCOV - code coverage report
Current view: top level - safekeeper/src - pull_timeline.rs (source / functions) Coverage Total Hit
Test: 15f04989d2faf4ce76cecb56042184aca56ebae6.info Lines: 0.0 % 315 0
Test Date: 2025-07-14 11:50:36 Functions: 0.0 % 26 0

            Line data    Source code
       1              : use std::cmp::min;
       2              : use std::io::{self, ErrorKind};
       3              : use std::ops::RangeInclusive;
       4              : use std::sync::Arc;
       5              : 
       6              : use anyhow::{Context, Result, anyhow, bail};
       7              : use bytes::Bytes;
       8              : use camino::Utf8PathBuf;
       9              : use chrono::{DateTime, Utc};
      10              : use futures::{SinkExt, StreamExt, TryStreamExt};
      11              : use http_utils::error::ApiError;
      12              : use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
      13              : use remote_storage::GenericRemoteStorage;
      14              : use reqwest::Certificate;
      15              : use safekeeper_api::Term;
      16              : use safekeeper_api::models::{PullTimelineRequest, PullTimelineResponse, TimelineStatus};
      17              : use safekeeper_client::mgmt_api;
      18              : use safekeeper_client::mgmt_api::Client;
      19              : use serde::Deserialize;
      20              : use tokio::fs::OpenOptions;
      21              : use tokio::io::AsyncWrite;
      22              : use tokio::sync::mpsc;
      23              : use tokio::task;
      24              : use tokio_tar::{Archive, Builder, Header};
      25              : use tokio_util::io::{CopyToBytes, SinkWriter};
      26              : use tokio_util::sync::PollSender;
      27              : use tracing::{error, info, instrument};
      28              : use utils::crashsafe::fsync_async_opt;
      29              : use utils::id::{NodeId, TenantTimelineId};
      30              : use utils::logging::SecretString;
      31              : use utils::lsn::Lsn;
      32              : use utils::pausable_failpoint;
      33              : 
      34              : use crate::control_file::CONTROL_FILE_NAME;
      35              : use crate::state::{EvictionState, TimelinePersistentState};
      36              : use crate::timeline::{Timeline, TimelineError, WalResidentTimeline};
      37              : use crate::timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline};
      38              : use crate::wal_storage::{open_wal_file, wal_file_paths};
      39              : use crate::{GlobalTimelines, debug_dump, wal_backup};
      40              : 
      41              : /// Stream tar archive of timeline to tx.
      42              : #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
      43              : pub async fn stream_snapshot(
      44              :     tli: Arc<Timeline>,
      45              :     source: NodeId,
      46              :     destination: NodeId,
      47              :     tx: mpsc::Sender<Result<Bytes>>,
      48              :     storage: Option<Arc<GenericRemoteStorage>>,
      49              : ) {
      50              :     match tli.try_wal_residence_guard().await {
      51              :         Err(e) => {
      52              :             tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
      53              :                 .await
      54              :                 .ok();
      55              :         }
      56              :         Ok(maybe_resident_tli) => {
      57              :             if let Err(e) = match maybe_resident_tli {
      58              :                 Some(resident_tli) => {
      59              :                     stream_snapshot_resident_guts(
      60              :                         resident_tli,
      61              :                         source,
      62              :                         destination,
      63              :                         tx.clone(),
      64              :                         storage,
      65              :                     )
      66              :                     .await
      67              :                 }
      68              :                 None => {
      69              :                     if let Some(storage) = storage {
      70              :                         stream_snapshot_offloaded_guts(
      71              :                             tli,
      72              :                             source,
      73              :                             destination,
      74              :                             tx.clone(),
      75              :                             &storage,
      76              :                         )
      77              :                         .await
      78              :                     } else {
      79              :                         tx.send(Err(anyhow!("remote storage not configured")))
      80              :                             .await
      81              :                             .ok();
      82              :                         return;
      83              :                     }
      84              :                 }
      85              :             } {
      86              :                 // Error type/contents don't matter as they won't can't reach the client
      87              :                 // (hyper likely doesn't do anything with it), but http stream will be
      88              :                 // prematurely terminated. It would be nice to try to send the error in
      89              :                 // trailers though.
      90              :                 tx.send(Err(anyhow!("snapshot failed"))).await.ok();
      91              :                 error!("snapshot failed: {:#}", e);
      92              :             }
      93              :         }
      94              :     }
      95              : }
      96              : 
      97              : /// State needed while streaming the snapshot.
      98              : pub struct SnapshotContext {
      99              :     /// The interval of segment numbers. If None, the timeline hasn't had writes yet, so only send the control file
     100              :     pub from_to_segno: Option<RangeInclusive<XLogSegNo>>,
     101              :     pub term: Term,
     102              :     pub last_log_term: Term,
     103              :     pub flush_lsn: Lsn,
     104              :     pub wal_seg_size: usize,
     105              :     // used to remove WAL hold off in Drop.
     106              :     pub tli: WalResidentTimeline,
     107              : }
     108              : 
     109              : impl Drop for SnapshotContext {
     110            0 :     fn drop(&mut self) {
     111            0 :         let tli = self.tli.clone();
     112            0 :         task::spawn(async move {
     113            0 :             let mut shared_state = tli.write_shared_state().await;
     114            0 :             shared_state.wal_removal_on_hold = false;
     115            0 :         });
     116            0 :     }
     117              : }
     118              : 
     119              : /// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
     120            0 : fn prepare_tar_stream(
     121            0 :     tx: mpsc::Sender<Result<Bytes>>,
     122            0 : ) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
     123              :     // tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
     124              :     // use SinkWriter as a Write impl. That is,
     125              :     // - create Sink from the tx. It returns PollSendError if chan is closed.
     126            0 :     let sink = PollSender::new(tx);
     127              :     // - SinkWriter needs sink error to be io one, map it.
     128            0 :     let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
     129              :     // - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
     130              :     //   it with with(). Note that with() accepts async function which we don't
     131              :     //   need and allows the map to fail, which we don't need either, but hence
     132              :     //   two Oks.
     133            0 :     let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
     134              :     // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
     135              :     // into CopyToBytes. This is a data copy.
     136            0 :     let copy_to_bytes = CopyToBytes::new(oksink);
     137            0 :     let writer = SinkWriter::new(copy_to_bytes);
     138            0 :     let pinned_writer = Box::pin(writer);
     139              : 
     140              :     // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
     141              :     // which is also likely suboptimal.
     142            0 :     Builder::new_non_terminated(pinned_writer)
     143            0 : }
     144              : 
     145              : /// Implementation of snapshot for an offloaded timeline, only reads control file
     146            0 : pub(crate) async fn stream_snapshot_offloaded_guts(
     147            0 :     tli: Arc<Timeline>,
     148            0 :     source: NodeId,
     149            0 :     destination: NodeId,
     150            0 :     tx: mpsc::Sender<Result<Bytes>>,
     151            0 :     storage: &GenericRemoteStorage,
     152            0 : ) -> Result<()> {
     153            0 :     let mut ar = prepare_tar_stream(tx);
     154              : 
     155            0 :     tli.snapshot_offloaded(&mut ar, source, destination, storage)
     156            0 :         .await?;
     157              : 
     158            0 :     ar.finish().await?;
     159              : 
     160            0 :     Ok(())
     161            0 : }
     162              : 
     163              : /// Implementation of snapshot for a timeline which is resident (includes some segment data)
     164            0 : pub async fn stream_snapshot_resident_guts(
     165            0 :     tli: WalResidentTimeline,
     166            0 :     source: NodeId,
     167            0 :     destination: NodeId,
     168            0 :     tx: mpsc::Sender<Result<Bytes>>,
     169            0 :     storage: Option<Arc<GenericRemoteStorage>>,
     170            0 : ) -> Result<()> {
     171            0 :     let mut ar = prepare_tar_stream(tx);
     172              : 
     173            0 :     let bctx = tli
     174            0 :         .start_snapshot(&mut ar, source, destination, storage)
     175            0 :         .await?;
     176            0 :     pausable_failpoint!("sk-snapshot-after-list-pausable");
     177              : 
     178            0 :     if let Some(from_to_segno) = &bctx.from_to_segno {
     179            0 :         let tli_dir = tli.get_timeline_dir();
     180            0 :         info!(
     181            0 :             "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
     182            0 :             from_to_segno.end() - from_to_segno.start() + 1,
     183            0 :             from_to_segno.start(),
     184            0 :             from_to_segno.end(),
     185              :             bctx.term,
     186              :             bctx.last_log_term,
     187              :             bctx.flush_lsn,
     188              :         );
     189            0 :         for segno in from_to_segno.clone() {
     190            0 :             let Some((mut sf, is_partial)) =
     191            0 :                 open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?
     192              :             else {
     193              :                 // File is not found
     194            0 :                 let (wal_file_path, _wal_file_partial_path) =
     195            0 :                     wal_file_paths(&tli_dir, segno, bctx.wal_seg_size);
     196            0 :                 tracing::warn!("couldn't find WAL segment file {wal_file_path}");
     197            0 :                 bail!("couldn't find WAL segment file {wal_file_path}")
     198              :             };
     199            0 :             let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
     200            0 :             if is_partial {
     201            0 :                 wal_file_name.push_str(".partial");
     202            0 :             }
     203            0 :             ar.append_file(&wal_file_name, &mut sf).await?;
     204              :         }
     205              :     } else {
     206            0 :         info!("Not including any segments into the snapshot");
     207              :     }
     208              : 
     209              :     // Do the term check before ar.finish to make archive corrupted in case of
     210              :     // term change. Client shouldn't ignore abrupt stream end, but to be sure.
     211            0 :     tli.finish_snapshot(&bctx).await?;
     212              : 
     213            0 :     ar.finish().await?;
     214              : 
     215            0 :     Ok(())
     216            0 : }
     217              : 
     218              : impl Timeline {
     219              :     /// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
     220              :     /// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
     221              :     /// we are offloaded and there aren't any)
     222            0 :     async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
     223            0 :         self: &Arc<Timeline>,
     224            0 :         ar: &mut tokio_tar::Builder<W>,
     225            0 :         source: NodeId,
     226            0 :         destination: NodeId,
     227            0 :         storage: &GenericRemoteStorage,
     228            0 :     ) -> Result<()> {
     229              :         // Take initial copy of control file, then release state lock
     230            0 :         let mut control_file = {
     231            0 :             let shared_state = self.write_shared_state().await;
     232              : 
     233            0 :             let control_file = TimelinePersistentState::clone(shared_state.sk.state());
     234              : 
     235              :             // Rare race: we got unevicted between entering function and reading control file.
     236              :             // We error out and let API caller retry.
     237            0 :             if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
     238            0 :                 bail!("Timeline was un-evicted during snapshot, please retry");
     239            0 :             }
     240              : 
     241            0 :             control_file
     242              :         };
     243              : 
     244              :         // Modify the partial segment of the in-memory copy for the control file to
     245              :         // point to the destination safekeeper.
     246            0 :         let replace = control_file
     247            0 :             .partial_backup
     248            0 :             .replace_uploaded_segment(source, destination)?;
     249              : 
     250            0 :         let Some(replace) = replace else {
     251              :             // In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
     252              :             // has a partial segment.  It is unexpected that
     253            0 :             anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
     254              :         };
     255              : 
     256            0 :         tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
     257              : 
     258              :         // Optimistically try to copy the partial segment to the destination's path: this
     259              :         // can fail if the timeline was un-evicted and modified in the background.
     260            0 :         let remote_timeline_path = &self.remote_path;
     261            0 :         wal_backup::copy_partial_segment(
     262            0 :             storage,
     263            0 :             &replace.previous.remote_path(remote_timeline_path),
     264            0 :             &replace.current.remote_path(remote_timeline_path),
     265            0 :         )
     266            0 :         .await?;
     267              : 
     268              :         // Since the S3 copy succeeded with the path given in our control file snapshot, and
     269              :         // we are sending that snapshot in our response, we are giving the caller a consistent
     270              :         // snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
     271            0 :         let buf = control_file
     272            0 :             .write_to_buf()
     273            0 :             .with_context(|| "failed to serialize control store")?;
     274            0 :         let mut header = Header::new_gnu();
     275            0 :         header.set_size(buf.len().try_into().expect("never breaches u64"));
     276            0 :         ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
     277            0 :             .await
     278            0 :             .with_context(|| "failed to append to archive")?;
     279              : 
     280            0 :         Ok(())
     281            0 :     }
     282              : }
     283              : 
     284              : impl WalResidentTimeline {
     285              :     /// Start streaming tar archive with timeline:
     286              :     /// 1) stream control file under lock;
     287              :     /// 2) hold off WAL removal;
     288              :     /// 3) collect SnapshotContext to understand which WAL segments should be
     289              :     ///    streamed.
     290              :     ///
     291              :     /// Snapshot streams data up to flush_lsn. To make this safe, we must check
     292              :     /// that term doesn't change during the procedure, or we risk sending mix of
     293              :     /// WAL from different histories. Term is remembered in the SnapshotContext
     294              :     /// and checked in finish_snapshot. Note that in the last segment some WAL
     295              :     /// higher than flush_lsn set here might be streamed; that's fine as long as
     296              :     /// terms doesn't change.
     297              :     ///
     298              :     /// Alternatively we could send only up to commit_lsn to get some valid
     299              :     /// state which later will be recovered by compute, in this case term check
     300              :     /// is not needed, but we likely don't want that as there might be no
     301              :     /// compute which could perform the recovery.
     302              :     ///
     303              :     /// When returned SnapshotContext is dropped WAL hold is removed.
     304            0 :     async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
     305            0 :         &self,
     306            0 :         ar: &mut tokio_tar::Builder<W>,
     307            0 :         source: NodeId,
     308            0 :         destination: NodeId,
     309            0 :         storage: Option<Arc<GenericRemoteStorage>>,
     310            0 :     ) -> Result<SnapshotContext> {
     311            0 :         let mut shared_state = self.write_shared_state().await;
     312            0 :         let wal_seg_size = shared_state.get_wal_seg_size();
     313              : 
     314            0 :         let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
     315              :         // Modify the partial segment of the in-memory copy for the control file to
     316              :         // point to the destination safekeeper.
     317            0 :         let replace = control_store
     318            0 :             .partial_backup
     319            0 :             .replace_uploaded_segment(source, destination)?;
     320              : 
     321            0 :         if let Some(replace) = replace {
     322              :             // The deserialized control file has an uploaded partial. We upload a copy
     323              :             // of it to object storage for the destination safekeeper and send an updated
     324              :             // control file in the snapshot.
     325            0 :             tracing::info!(
     326            0 :                 "Replacing uploaded partial segment in in-mem control file: {replace:?}"
     327              :             );
     328              : 
     329            0 :             let remote_timeline_path = &self.tli.remote_path;
     330            0 :             wal_backup::copy_partial_segment(
     331            0 :                 &*storage.context("remote storage not configured")?,
     332            0 :                 &replace.previous.remote_path(remote_timeline_path),
     333            0 :                 &replace.current.remote_path(remote_timeline_path),
     334              :             )
     335            0 :             .await?;
     336            0 :         }
     337              : 
     338            0 :         let buf = control_store
     339            0 :             .write_to_buf()
     340            0 :             .with_context(|| "failed to serialize control store")?;
     341            0 :         let mut header = Header::new_gnu();
     342            0 :         header.set_size(buf.len().try_into().expect("never breaches u64"));
     343            0 :         ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
     344            0 :             .await
     345            0 :             .with_context(|| "failed to append to archive")?;
     346              : 
     347              :         // We need to stream since the oldest segment someone (s3 or pageserver)
     348              :         // still needs. This duplicates calc_horizon_lsn logic.
     349              :         //
     350              :         // We know that WAL wasn't removed up to this point because it cannot be
     351              :         // removed further than `backup_lsn`. Since we're holding shared_state
     352              :         // lock and setting `wal_removal_on_hold` later, it guarantees that WAL
     353              :         // won't be removed until we're done.
     354            0 :         let timeline_state = shared_state.sk.state();
     355            0 :         let from_lsn = min(
     356            0 :             timeline_state.remote_consistent_lsn,
     357            0 :             timeline_state.backup_lsn,
     358              :         );
     359            0 :         let flush_lsn = shared_state.sk.flush_lsn();
     360            0 :         let (send_segments, msg) = if from_lsn == Lsn::INVALID {
     361            0 :             (false, "snapshot is called on uninitialized timeline")
     362              :         } else {
     363            0 :             (true, "timeline is initialized")
     364              :         };
     365            0 :         tracing::info!(
     366            0 :             remote_consistent_lsn=%timeline_state.remote_consistent_lsn,
     367            0 :             backup_lsn=%timeline_state.backup_lsn,
     368              :             %flush_lsn,
     369            0 :             "{msg}"
     370              :         );
     371            0 :         let from_segno = from_lsn.segment_number(wal_seg_size);
     372            0 :         let term = shared_state.sk.state().acceptor_state.term;
     373            0 :         let last_log_term = shared_state.sk.last_log_term();
     374            0 :         let upto_segno = flush_lsn.segment_number(wal_seg_size);
     375              :         // have some limit on max number of segments as a sanity check
     376              :         const MAX_ALLOWED_SEGS: u64 = 1000;
     377            0 :         let num_segs = upto_segno - from_segno + 1;
     378            0 :         if num_segs > MAX_ALLOWED_SEGS {
     379            0 :             bail!(
     380            0 :                 "snapshot is called on timeline with {} segments, but the limit is {}",
     381              :                 num_segs,
     382              :                 MAX_ALLOWED_SEGS
     383              :             );
     384            0 :         }
     385              : 
     386              :         // Prevent WAL removal while we're streaming data.
     387              :         //
     388              :         // Since this a flag, not a counter just bail out if already set; we
     389              :         // shouldn't need concurrent snapshotting.
     390            0 :         if shared_state.wal_removal_on_hold {
     391            0 :             bail!("wal_removal_on_hold is already true");
     392            0 :         }
     393            0 :         shared_state.wal_removal_on_hold = true;
     394              : 
     395              :         // Drop shared_state to release the lock, before calling wal_residence_guard().
     396            0 :         drop(shared_state);
     397              : 
     398            0 :         let tli_copy = self.wal_residence_guard().await?;
     399            0 :         let from_to_segno = send_segments.then_some(from_segno..=upto_segno);
     400            0 :         let bctx = SnapshotContext {
     401            0 :             from_to_segno,
     402            0 :             term,
     403            0 :             last_log_term,
     404            0 :             flush_lsn,
     405            0 :             wal_seg_size,
     406            0 :             tli: tli_copy,
     407            0 :         };
     408              : 
     409            0 :         Ok(bctx)
     410            0 :     }
     411              : 
     412              :     /// Finish snapshotting: check that term(s) hasn't changed.
     413              :     ///
     414              :     /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
     415              :     /// forget this if snapshotting fails mid the way.
     416            0 :     pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
     417            0 :         let shared_state = self.read_shared_state().await;
     418            0 :         let term = shared_state.sk.state().acceptor_state.term;
     419            0 :         let last_log_term = shared_state.sk.last_log_term();
     420              :         // There are some cases to relax this check (e.g. last_log_term might
     421              :         // change, but as long as older history is strictly part of new that's
     422              :         // fine), but there is no need to do it.
     423            0 :         if bctx.term != term || bctx.last_log_term != last_log_term {
     424            0 :             bail!(
     425            0 :                 "term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
     426              :                 bctx.term,
     427              :                 bctx.last_log_term,
     428              :                 term,
     429              :                 last_log_term
     430              :             );
     431            0 :         }
     432            0 :         Ok(())
     433            0 :     }
     434              : }
     435              : 
     436              : /// Response for debug dump request.
     437            0 : #[derive(Debug, Deserialize)]
     438              : pub struct DebugDumpResponse {
     439              :     pub start_time: DateTime<Utc>,
     440              :     pub finish_time: DateTime<Utc>,
     441              :     pub timelines: Vec<debug_dump::Timeline>,
     442              :     pub timelines_count: usize,
     443              :     pub config: debug_dump::Config,
     444              : }
     445              : 
     446              : /// Find the most advanced safekeeper and pull timeline from it.
     447            0 : pub async fn handle_request(
     448            0 :     request: PullTimelineRequest,
     449            0 :     sk_auth_token: Option<SecretString>,
     450            0 :     ssl_ca_certs: Vec<Certificate>,
     451            0 :     global_timelines: Arc<GlobalTimelines>,
     452            0 : ) -> Result<PullTimelineResponse, ApiError> {
     453            0 :     let existing_tli = global_timelines.get(TenantTimelineId::new(
     454            0 :         request.tenant_id,
     455            0 :         request.timeline_id,
     456            0 :     ));
     457            0 :     if existing_tli.is_ok() {
     458            0 :         info!("Timeline {} already exists", request.timeline_id);
     459            0 :         return Ok(PullTimelineResponse {
     460            0 :             safekeeper_host: None,
     461            0 :         });
     462            0 :     }
     463              : 
     464            0 :     let mut http_client = reqwest::Client::builder();
     465            0 :     for ssl_ca_cert in ssl_ca_certs {
     466            0 :         http_client = http_client.add_root_certificate(ssl_ca_cert);
     467            0 :     }
     468            0 :     let http_client = http_client
     469            0 :         .build()
     470            0 :         .map_err(|e| ApiError::InternalServerError(e.into()))?;
     471              : 
     472            0 :     let http_hosts = request.http_hosts.clone();
     473              : 
     474              :     // Figure out statuses of potential donors.
     475            0 :     let responses: Vec<Result<TimelineStatus, mgmt_api::Error>> =
     476            0 :         futures::future::join_all(http_hosts.iter().map(|url| async {
     477            0 :             let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone());
     478            0 :             let info = cclient
     479            0 :                 .timeline_status(request.tenant_id, request.timeline_id)
     480            0 :                 .await?;
     481            0 :             Ok(info)
     482            0 :         }))
     483            0 :         .await;
     484              : 
     485            0 :     let mut statuses = Vec::new();
     486            0 :     for (i, response) in responses.into_iter().enumerate() {
     487            0 :         match response {
     488            0 :             Ok(status) => {
     489            0 :                 statuses.push((status, i));
     490            0 :             }
     491            0 :             Err(e) => {
     492            0 :                 info!("error fetching status from {}: {e}", http_hosts[i]);
     493              :             }
     494              :         }
     495              :     }
     496              : 
     497              :     // Allow missing responses from up to one safekeeper (say due to downtime)
     498              :     // e.g. if we created a timeline on PS A and B, with C being offline. Then B goes
     499              :     // offline and C comes online. Then we want a pull on C with A and B as hosts to work.
     500            0 :     let min_required_successful = (http_hosts.len() - 1).max(1);
     501            0 :     if statuses.len() < min_required_successful {
     502            0 :         return Err(ApiError::InternalServerError(anyhow::anyhow!(
     503            0 :             "only got {} successful status responses. required: {min_required_successful}",
     504            0 :             statuses.len()
     505            0 :         )));
     506            0 :     }
     507              : 
     508              :     // Find the most advanced safekeeper
     509            0 :     let (status, i) = statuses
     510            0 :         .into_iter()
     511            0 :         .max_by_key(|(status, _)| {
     512            0 :             (
     513            0 :                 status.acceptor_state.epoch,
     514            0 :                 status.flush_lsn,
     515            0 :                 status.commit_lsn,
     516            0 :             )
     517            0 :         })
     518            0 :         .unwrap();
     519            0 :     let safekeeper_host = http_hosts[i].clone();
     520              : 
     521            0 :     assert!(status.tenant_id == request.tenant_id);
     522            0 :     assert!(status.timeline_id == request.timeline_id);
     523              : 
     524            0 :     let check_tombstone = !request.ignore_tombstone.unwrap_or_default();
     525              : 
     526            0 :     match pull_timeline(
     527            0 :         status,
     528            0 :         safekeeper_host,
     529            0 :         sk_auth_token,
     530            0 :         http_client,
     531            0 :         global_timelines,
     532            0 :         check_tombstone,
     533              :     )
     534            0 :     .await
     535              :     {
     536            0 :         Ok(resp) => Ok(resp),
     537            0 :         Err(e) => {
     538            0 :             match e.downcast_ref::<TimelineError>() {
     539            0 :                 Some(TimelineError::AlreadyExists(_)) => Ok(PullTimelineResponse {
     540            0 :                     safekeeper_host: None,
     541            0 :                 }),
     542              :                 Some(TimelineError::CreationInProgress(_)) => {
     543              :                     // We don't return success here because creation might still fail.
     544            0 :                     Err(ApiError::Conflict("Creation in progress".to_owned()))
     545              :                 }
     546            0 :                 _ => Err(ApiError::InternalServerError(e)),
     547              :             }
     548              :         }
     549              :     }
     550            0 : }
     551              : 
     552            0 : async fn pull_timeline(
     553            0 :     status: TimelineStatus,
     554            0 :     host: String,
     555            0 :     sk_auth_token: Option<SecretString>,
     556            0 :     http_client: reqwest::Client,
     557            0 :     global_timelines: Arc<GlobalTimelines>,
     558            0 :     check_tombstone: bool,
     559            0 : ) -> Result<PullTimelineResponse> {
     560            0 :     let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
     561            0 :     info!(
     562            0 :         "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
     563              :         ttid,
     564              :         host,
     565              :         status.commit_lsn,
     566              :         status.flush_lsn,
     567              :         status.acceptor_state.term,
     568              :         status.acceptor_state.epoch
     569              :     );
     570              : 
     571            0 :     let conf = &global_timelines.get_global_config();
     572              : 
     573            0 :     let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
     574            0 :     let client = Client::new(http_client, host.clone(), sk_auth_token.clone());
     575              :     // Request stream with basebackup archive.
     576            0 :     let bb_resp = client
     577            0 :         .snapshot(status.tenant_id, status.timeline_id, conf.my_id)
     578            0 :         .await?;
     579              : 
     580              :     // Make Stream of Bytes from it...
     581            0 :     let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
     582              :     // and turn it into StreamReader implementing AsyncRead.
     583            0 :     let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
     584              : 
     585              :     // Extract it on the fly to the disk. We don't use simple unpack() to fsync
     586              :     // files.
     587            0 :     let mut entries = Archive::new(bb_reader).entries()?;
     588            0 :     while let Some(base_tar_entry) = entries.next().await {
     589            0 :         let mut entry = base_tar_entry?;
     590            0 :         let header = entry.header();
     591            0 :         let file_path = header.path()?.into_owned();
     592            0 :         match header.entry_type() {
     593              :             tokio_tar::EntryType::Regular => {
     594            0 :                 let utf8_file_path =
     595            0 :                     Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
     596            0 :                 let dst_path = tli_dir_path.join(utf8_file_path);
     597            0 :                 let mut f = OpenOptions::new()
     598            0 :                     .create(true)
     599            0 :                     .truncate(true)
     600            0 :                     .write(true)
     601            0 :                     .open(&dst_path)
     602            0 :                     .await?;
     603            0 :                 tokio::io::copy(&mut entry, &mut f).await?;
     604              :                 // fsync the file
     605            0 :                 f.sync_all().await?;
     606              :             }
     607              :             _ => {
     608            0 :                 bail!(
     609            0 :                     "entry {} in backup tar archive is of unexpected type: {:?}",
     610            0 :                     file_path.display(),
     611            0 :                     header.entry_type()
     612              :                 );
     613              :             }
     614              :         }
     615              :     }
     616              : 
     617              :     // fsync temp timeline directory to remember its contents.
     618            0 :     fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
     619              : 
     620              :     // Let's create timeline from temp directory and verify that it's correct
     621            0 :     let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?;
     622            0 :     info!(
     623            0 :         "finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
     624              :         ttid, commit_lsn, flush_lsn
     625              :     );
     626            0 :     assert!(status.commit_lsn <= status.flush_lsn);
     627              : 
     628              :     // Finally, load the timeline.
     629            0 :     let _tli = global_timelines
     630            0 :         .load_temp_timeline(ttid, &tli_dir_path, check_tombstone)
     631            0 :         .await?;
     632              : 
     633            0 :     Ok(PullTimelineResponse {
     634            0 :         safekeeper_host: Some(host),
     635            0 :     })
     636            0 : }
        

Generated by: LCOV version 2.1-beta