LCOV - code coverage report
Current view: top level - safekeeper/src - wal_backup.rs (source / functions) Coverage Total Hit
Test: 53437f7e869ac68c86c7d3e4c20964c0156f158c.info Lines: 0.0 % 428 0
Test Date: 2024-09-20 16:14:12 Functions: 0.0 % 59 0

            Line data    Source code
       1              : use anyhow::{Context, Result};
       2              : 
       3              : use camino::{Utf8Path, Utf8PathBuf};
       4              : use futures::stream::FuturesOrdered;
       5              : use futures::StreamExt;
       6              : use tokio::task::JoinHandle;
       7              : use tokio_util::sync::CancellationToken;
       8              : use utils::backoff;
       9              : use utils::id::NodeId;
      10              : 
      11              : use std::cmp::min;
      12              : use std::collections::HashSet;
      13              : use std::num::NonZeroU32;
      14              : use std::pin::Pin;
      15              : use std::time::Duration;
      16              : 
      17              : use postgres_ffi::v14::xlog_utils::XLogSegNoOffsetToRecPtr;
      18              : use postgres_ffi::XLogFileName;
      19              : use postgres_ffi::{XLogSegNo, PG_TLI};
      20              : use remote_storage::{GenericRemoteStorage, ListingMode, RemotePath, StorageMetadata};
      21              : use tokio::fs::File;
      22              : 
      23              : use tokio::select;
      24              : use tokio::sync::mpsc::{self, Receiver, Sender};
      25              : use tokio::sync::{watch, OnceCell};
      26              : use tokio::time::sleep;
      27              : use tracing::*;
      28              : 
      29              : use utils::{id::TenantTimelineId, lsn::Lsn};
      30              : 
      31              : use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS};
      32              : use crate::timeline::{PeerInfo, WalResidentTimeline};
      33              : use crate::timeline_manager::{Manager, StateSnapshot};
      34              : use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME};
      35              : 
      36              : const UPLOAD_FAILURE_RETRY_MIN_MS: u64 = 10;
      37              : const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000;
      38              : 
      39              : /// Default buffer size when interfacing with [`tokio::fs::File`].
      40              : const BUFFER_SIZE: usize = 32 * 1024;
      41              : 
      42              : pub struct WalBackupTaskHandle {
      43              :     shutdown_tx: Sender<()>,
      44              :     handle: JoinHandle<()>,
      45              : }
      46              : 
      47              : /// Do we have anything to upload to S3, i.e. should safekeepers run backup activity?
      48            0 : pub(crate) fn is_wal_backup_required(
      49            0 :     wal_seg_size: usize,
      50            0 :     num_computes: usize,
      51            0 :     state: &StateSnapshot,
      52            0 : ) -> bool {
      53            0 :     num_computes > 0 ||
      54              :     // Currently only the whole segment is offloaded, so compare segment numbers.
      55            0 :     (state.commit_lsn.segment_number(wal_seg_size) > state.backup_lsn.segment_number(wal_seg_size))
      56            0 : }
      57              : 
      58              : /// Based on peer information determine which safekeeper should offload; if it
      59              : /// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task
      60              : /// is running, kill it.
      61            0 : pub(crate) async fn update_task(mgr: &mut Manager, need_backup: bool, state: &StateSnapshot) {
      62            0 :     let (offloader, election_dbg_str) =
      63            0 :         determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf);
      64            0 :     let elected_me = Some(mgr.conf.my_id) == offloader;
      65              : 
      66            0 :     let should_task_run = need_backup && elected_me;
      67              : 
      68              :     // start or stop the task
      69            0 :     if should_task_run != (mgr.backup_task.is_some()) {
      70            0 :         if should_task_run {
      71            0 :             info!("elected for backup: {}", election_dbg_str);
      72              : 
      73            0 :             let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
      74            0 : 
      75            0 :             let async_task = backup_task_main(
      76            0 :                 mgr.wal_resident_timeline(),
      77            0 :                 mgr.conf.backup_parallel_jobs,
      78            0 :                 shutdown_rx,
      79            0 :             );
      80              : 
      81            0 :             let handle = if mgr.conf.current_thread_runtime {
      82            0 :                 tokio::spawn(async_task)
      83              :             } else {
      84            0 :                 WAL_BACKUP_RUNTIME.spawn(async_task)
      85              :             };
      86              : 
      87            0 :             mgr.backup_task = Some(WalBackupTaskHandle {
      88            0 :                 shutdown_tx,
      89            0 :                 handle,
      90            0 :             });
      91              :         } else {
      92            0 :             if !need_backup {
      93              :                 // don't need backup at all
      94            0 :                 info!("stepping down from backup, need_backup={}", need_backup);
      95              :             } else {
      96              :                 // someone else has been elected
      97            0 :                 info!("stepping down from backup: {}", election_dbg_str);
      98              :             }
      99            0 :             shut_down_task(&mut mgr.backup_task).await;
     100              :         }
     101            0 :     }
     102            0 : }
     103              : 
     104            0 : async fn shut_down_task(entry: &mut Option<WalBackupTaskHandle>) {
     105            0 :     if let Some(wb_handle) = entry.take() {
     106              :         // Tell the task to shutdown. Error means task exited earlier, that's ok.
     107            0 :         let _ = wb_handle.shutdown_tx.send(()).await;
     108              :         // Await the task itself. TODO: restart panicked tasks earlier.
     109            0 :         if let Err(e) = wb_handle.handle.await {
     110            0 :             warn!("WAL backup task panicked: {}", e);
     111            0 :         }
     112            0 :     }
     113            0 : }
     114              : 
     115              : /// The goal is to ensure that normally only one safekeepers offloads. However,
     116              : /// it is fine (and inevitable, as s3 doesn't provide CAS) that for some short
     117              : /// time we have several ones as they PUT the same files. Also,
     118              : /// - frequently changing the offloader would be bad;
     119              : /// - electing seriously lagging safekeeper is undesirable;
     120              : ///
     121              : /// So we deterministically choose among the reasonably caught up candidates.
     122              : /// TODO: take into account failed attempts to deal with hypothetical situation
     123              : /// where s3 is unreachable only for some sks.
     124            0 : fn determine_offloader(
     125            0 :     alive_peers: &[PeerInfo],
     126            0 :     wal_backup_lsn: Lsn,
     127            0 :     ttid: TenantTimelineId,
     128            0 :     conf: &SafeKeeperConf,
     129            0 : ) -> (Option<NodeId>, String) {
     130            0 :     // TODO: remove this once we fill newly joined safekeepers since backup_lsn.
     131            0 :     let capable_peers = alive_peers
     132            0 :         .iter()
     133            0 :         .filter(|p| p.local_start_lsn <= wal_backup_lsn);
     134            0 :     match capable_peers.clone().map(|p| p.commit_lsn).max() {
     135            0 :         None => (None, "no connected peers to elect from".to_string()),
     136            0 :         Some(max_commit_lsn) => {
     137            0 :             let threshold = max_commit_lsn
     138            0 :                 .checked_sub(conf.max_offloader_lag_bytes)
     139            0 :                 .unwrap_or(Lsn(0));
     140            0 :             let mut caughtup_peers = capable_peers
     141            0 :                 .clone()
     142            0 :                 .filter(|p| p.commit_lsn >= threshold)
     143            0 :                 .collect::<Vec<_>>();
     144            0 :             caughtup_peers.sort_by(|p1, p2| p1.sk_id.cmp(&p2.sk_id));
     145            0 : 
     146            0 :             // To distribute the load, shift by timeline_id.
     147            0 :             let offloader = caughtup_peers
     148            0 :                 [(u128::from(ttid.timeline_id) % caughtup_peers.len() as u128) as usize]
     149            0 :                 .sk_id;
     150            0 : 
     151            0 :             let mut capable_peers_dbg = capable_peers
     152            0 :                 .map(|p| (p.sk_id, p.commit_lsn))
     153            0 :                 .collect::<Vec<_>>();
     154            0 :             capable_peers_dbg.sort_by(|p1, p2| p1.0.cmp(&p2.0));
     155            0 :             (
     156            0 :                 Some(offloader),
     157            0 :                 format!(
     158            0 :                     "elected {} among {:?} peers, with {} of them being caughtup",
     159            0 :                     offloader,
     160            0 :                     capable_peers_dbg,
     161            0 :                     caughtup_peers.len()
     162            0 :                 ),
     163            0 :             )
     164              :         }
     165              :     }
     166            0 : }
     167              : 
     168              : static REMOTE_STORAGE: OnceCell<Option<GenericRemoteStorage>> = OnceCell::const_new();
     169              : 
     170              : // Storage must be configured and initialized when this is called.
     171            0 : fn get_configured_remote_storage() -> &'static GenericRemoteStorage {
     172            0 :     REMOTE_STORAGE
     173            0 :         .get()
     174            0 :         .expect("failed to get remote storage")
     175            0 :         .as_ref()
     176            0 :         .unwrap()
     177            0 : }
     178              : 
     179            0 : pub async fn init_remote_storage(conf: &SafeKeeperConf) {
     180            0 :     // TODO: refactor REMOTE_STORAGE to avoid using global variables, and provide
     181            0 :     // dependencies to all tasks instead.
     182            0 :     REMOTE_STORAGE
     183            0 :         .get_or_init(|| async {
     184            0 :             if let Some(conf) = conf.remote_storage.as_ref() {
     185              :                 Some(
     186            0 :                     GenericRemoteStorage::from_config(conf)
     187            0 :                         .await
     188            0 :                         .expect("failed to create remote storage"),
     189              :                 )
     190              :             } else {
     191            0 :                 None
     192              :             }
     193            0 :         })
     194            0 :         .await;
     195            0 : }
     196              : 
     197              : struct WalBackupTask {
     198              :     timeline: WalResidentTimeline,
     199              :     timeline_dir: Utf8PathBuf,
     200              :     wal_seg_size: usize,
     201              :     parallel_jobs: usize,
     202              :     commit_lsn_watch_rx: watch::Receiver<Lsn>,
     203              : }
     204              : 
     205              : /// Offload single timeline.
     206            0 : #[instrument(name = "wal_backup", skip_all, fields(ttid = %tli.ttid))]
     207              : async fn backup_task_main(
     208              :     tli: WalResidentTimeline,
     209              :     parallel_jobs: usize,
     210              :     mut shutdown_rx: Receiver<()>,
     211              : ) {
     212              :     let _guard = WAL_BACKUP_TASKS.guard();
     213              :     info!("started");
     214              : 
     215              :     let mut wb = WalBackupTask {
     216              :         wal_seg_size: tli.get_wal_seg_size().await,
     217              :         commit_lsn_watch_rx: tli.get_commit_lsn_watch_rx(),
     218              :         timeline_dir: tli.get_timeline_dir(),
     219              :         timeline: tli,
     220              :         parallel_jobs,
     221              :     };
     222              : 
     223              :     // task is spinned up only when wal_seg_size already initialized
     224              :     assert!(wb.wal_seg_size > 0);
     225              : 
     226              :     let mut canceled = false;
     227              :     select! {
     228              :         _ = wb.run() => {}
     229              :         _ = shutdown_rx.recv() => {
     230              :             canceled = true;
     231              :         }
     232              :     }
     233              :     info!("task {}", if canceled { "canceled" } else { "terminated" });
     234              : }
     235              : 
     236              : impl WalBackupTask {
     237            0 :     async fn run(&mut self) {
     238            0 :         let mut backup_lsn = Lsn(0);
     239            0 : 
     240            0 :         let mut retry_attempt = 0u32;
     241              :         // offload loop
     242              :         loop {
     243            0 :             if retry_attempt == 0 {
     244              :                 // wait for new WAL to arrive
     245            0 :                 if let Err(e) = self.commit_lsn_watch_rx.changed().await {
     246              :                     // should never happen, as we hold Arc to timeline.
     247            0 :                     error!("commit_lsn watch shut down: {:?}", e);
     248            0 :                     return;
     249            0 :                 }
     250              :             } else {
     251              :                 // or just sleep if we errored previously
     252            0 :                 let mut retry_delay = UPLOAD_FAILURE_RETRY_MAX_MS;
     253            0 :                 if let Some(backoff_delay) = UPLOAD_FAILURE_RETRY_MIN_MS.checked_shl(retry_attempt)
     254            0 :                 {
     255            0 :                     retry_delay = min(retry_delay, backoff_delay);
     256            0 :                 }
     257            0 :                 sleep(Duration::from_millis(retry_delay)).await;
     258              :             }
     259              : 
     260            0 :             let commit_lsn = *self.commit_lsn_watch_rx.borrow();
     261            0 : 
     262            0 :             // Note that backup_lsn can be higher than commit_lsn if we
     263            0 :             // don't have much local WAL and others already uploaded
     264            0 :             // segments we don't even have.
     265            0 :             if backup_lsn.segment_number(self.wal_seg_size)
     266            0 :                 >= commit_lsn.segment_number(self.wal_seg_size)
     267              :             {
     268            0 :                 retry_attempt = 0;
     269            0 :                 continue; /* nothing to do, common case as we wake up on every commit_lsn bump */
     270            0 :             }
     271            0 :             // Perhaps peers advanced the position, check shmem value.
     272            0 :             backup_lsn = self.timeline.get_wal_backup_lsn().await;
     273            0 :             if backup_lsn.segment_number(self.wal_seg_size)
     274            0 :                 >= commit_lsn.segment_number(self.wal_seg_size)
     275              :             {
     276            0 :                 retry_attempt = 0;
     277            0 :                 continue;
     278            0 :             }
     279            0 : 
     280            0 :             match backup_lsn_range(
     281            0 :                 &self.timeline,
     282            0 :                 &mut backup_lsn,
     283            0 :                 commit_lsn,
     284            0 :                 self.wal_seg_size,
     285            0 :                 &self.timeline_dir,
     286            0 :                 self.parallel_jobs,
     287            0 :             )
     288            0 :             .await
     289              :             {
     290            0 :                 Ok(()) => {
     291            0 :                     retry_attempt = 0;
     292            0 :                 }
     293            0 :                 Err(e) => {
     294            0 :                     error!(
     295            0 :                         "failed while offloading range {}-{}: {:?}",
     296              :                         backup_lsn, commit_lsn, e
     297              :                     );
     298              : 
     299            0 :                     retry_attempt = retry_attempt.saturating_add(1);
     300              :                 }
     301              :             }
     302              :         }
     303            0 :     }
     304              : }
     305              : 
     306            0 : async fn backup_lsn_range(
     307            0 :     timeline: &WalResidentTimeline,
     308            0 :     backup_lsn: &mut Lsn,
     309            0 :     end_lsn: Lsn,
     310            0 :     wal_seg_size: usize,
     311            0 :     timeline_dir: &Utf8Path,
     312            0 :     parallel_jobs: usize,
     313            0 : ) -> Result<()> {
     314            0 :     if parallel_jobs < 1 {
     315            0 :         anyhow::bail!("parallel_jobs must be >= 1");
     316            0 :     }
     317            0 : 
     318            0 :     let remote_timeline_path = &timeline.remote_path;
     319            0 :     let start_lsn = *backup_lsn;
     320            0 :     let segments = get_segments(start_lsn, end_lsn, wal_seg_size);
     321            0 : 
     322            0 :     // Pool of concurrent upload tasks. We use `FuturesOrdered` to
     323            0 :     // preserve order of uploads, and update `backup_lsn` only after
     324            0 :     // all previous uploads are finished.
     325            0 :     let mut uploads = FuturesOrdered::new();
     326            0 :     let mut iter = segments.iter();
     327              : 
     328              :     loop {
     329            0 :         let added_task = match iter.next() {
     330            0 :             Some(s) => {
     331            0 :                 uploads.push_back(backup_single_segment(s, timeline_dir, remote_timeline_path));
     332            0 :                 true
     333              :             }
     334            0 :             None => false,
     335              :         };
     336              : 
     337              :         // Wait for the next segment to upload if we don't have any more segments,
     338              :         // or if we have too many concurrent uploads.
     339            0 :         if !added_task || uploads.len() >= parallel_jobs {
     340            0 :             let next = uploads.next().await;
     341            0 :             if let Some(res) = next {
     342              :                 // next segment uploaded
     343            0 :                 let segment = res?;
     344            0 :                 let new_backup_lsn = segment.end_lsn;
     345            0 :                 timeline
     346            0 :                     .set_wal_backup_lsn(new_backup_lsn)
     347            0 :                     .await
     348            0 :                     .context("setting wal_backup_lsn")?;
     349            0 :                 *backup_lsn = new_backup_lsn;
     350              :             } else {
     351              :                 // no more segments to upload
     352            0 :                 break;
     353              :             }
     354            0 :         }
     355              :     }
     356              : 
     357            0 :     info!(
     358            0 :         "offloaded segnos {:?} up to {}, previous backup_lsn {}",
     359            0 :         segments.iter().map(|&s| s.seg_no).collect::<Vec<_>>(),
     360              :         end_lsn,
     361              :         start_lsn,
     362              :     );
     363            0 :     Ok(())
     364            0 : }
     365              : 
     366            0 : async fn backup_single_segment(
     367            0 :     seg: &Segment,
     368            0 :     timeline_dir: &Utf8Path,
     369            0 :     remote_timeline_path: &RemotePath,
     370            0 : ) -> Result<Segment> {
     371            0 :     let segment_file_path = seg.file_path(timeline_dir)?;
     372            0 :     let remote_segment_path = seg.remote_path(remote_timeline_path);
     373              : 
     374            0 :     let res = backup_object(&segment_file_path, &remote_segment_path, seg.size()).await;
     375            0 :     if res.is_ok() {
     376            0 :         BACKED_UP_SEGMENTS.inc();
     377            0 :     } else {
     378            0 :         BACKUP_ERRORS.inc();
     379            0 :     }
     380            0 :     res?;
     381            0 :     debug!("Backup of {} done", segment_file_path);
     382              : 
     383            0 :     Ok(*seg)
     384            0 : }
     385              : 
     386              : #[derive(Debug, Copy, Clone)]
     387              : pub struct Segment {
     388              :     seg_no: XLogSegNo,
     389              :     start_lsn: Lsn,
     390              :     end_lsn: Lsn,
     391              : }
     392              : 
     393              : impl Segment {
     394            0 :     pub fn new(seg_no: u64, start_lsn: Lsn, end_lsn: Lsn) -> Self {
     395            0 :         Self {
     396            0 :             seg_no,
     397            0 :             start_lsn,
     398            0 :             end_lsn,
     399            0 :         }
     400            0 :     }
     401              : 
     402            0 :     pub fn object_name(self) -> String {
     403            0 :         XLogFileName(PG_TLI, self.seg_no, self.size())
     404            0 :     }
     405              : 
     406            0 :     pub fn file_path(self, timeline_dir: &Utf8Path) -> Result<Utf8PathBuf> {
     407            0 :         Ok(timeline_dir.join(self.object_name()))
     408            0 :     }
     409              : 
     410            0 :     pub fn remote_path(self, remote_timeline_path: &RemotePath) -> RemotePath {
     411            0 :         remote_timeline_path.join(self.object_name())
     412            0 :     }
     413              : 
     414            0 :     pub fn size(self) -> usize {
     415            0 :         (u64::from(self.end_lsn) - u64::from(self.start_lsn)) as usize
     416            0 :     }
     417              : }
     418              : 
     419            0 : fn get_segments(start: Lsn, end: Lsn, seg_size: usize) -> Vec<Segment> {
     420            0 :     let first_seg = start.segment_number(seg_size);
     421            0 :     let last_seg = end.segment_number(seg_size);
     422            0 : 
     423            0 :     let res: Vec<Segment> = (first_seg..last_seg)
     424            0 :         .map(|s| {
     425            0 :             let start_lsn = XLogSegNoOffsetToRecPtr(s, 0, seg_size);
     426            0 :             let end_lsn = XLogSegNoOffsetToRecPtr(s + 1, 0, seg_size);
     427            0 :             Segment::new(s, Lsn::from(start_lsn), Lsn::from(end_lsn))
     428            0 :         })
     429            0 :         .collect();
     430            0 :     res
     431            0 : }
     432              : 
     433            0 : async fn backup_object(
     434            0 :     source_file: &Utf8Path,
     435            0 :     target_file: &RemotePath,
     436            0 :     size: usize,
     437            0 : ) -> Result<()> {
     438            0 :     let storage = get_configured_remote_storage();
     439              : 
     440            0 :     let file = File::open(&source_file)
     441            0 :         .await
     442            0 :         .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
     443              : 
     444            0 :     let file = tokio_util::io::ReaderStream::with_capacity(file, BUFFER_SIZE);
     445            0 : 
     446            0 :     let cancel = CancellationToken::new();
     447            0 : 
     448            0 :     storage
     449            0 :         .upload_storage_object(file, size, target_file, &cancel)
     450            0 :         .await
     451            0 : }
     452              : 
     453            0 : pub(crate) async fn backup_partial_segment(
     454            0 :     source_file: &Utf8Path,
     455            0 :     target_file: &RemotePath,
     456            0 :     size: usize,
     457            0 : ) -> Result<()> {
     458            0 :     let storage = get_configured_remote_storage();
     459              : 
     460            0 :     let file = File::open(&source_file)
     461            0 :         .await
     462            0 :         .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
     463              : 
     464              :     // limiting the file to read only the first `size` bytes
     465            0 :     let limited_file = tokio::io::AsyncReadExt::take(file, size as u64);
     466            0 : 
     467            0 :     let file = tokio_util::io::ReaderStream::with_capacity(limited_file, BUFFER_SIZE);
     468            0 : 
     469            0 :     let cancel = CancellationToken::new();
     470            0 : 
     471            0 :     storage
     472            0 :         .upload(
     473            0 :             file,
     474            0 :             size,
     475            0 :             target_file,
     476            0 :             Some(StorageMetadata::from([("sk_type", "partial_segment")])),
     477            0 :             &cancel,
     478            0 :         )
     479            0 :         .await
     480            0 : }
     481              : 
     482            0 : pub(crate) async fn copy_partial_segment(
     483            0 :     source: &RemotePath,
     484            0 :     destination: &RemotePath,
     485            0 : ) -> Result<()> {
     486            0 :     let storage = get_configured_remote_storage();
     487            0 :     let cancel = CancellationToken::new();
     488            0 : 
     489            0 :     storage.copy_object(source, destination, &cancel).await
     490            0 : }
     491              : 
     492            0 : pub async fn read_object(
     493            0 :     file_path: &RemotePath,
     494            0 :     offset: u64,
     495            0 : ) -> anyhow::Result<Pin<Box<dyn tokio::io::AsyncRead + Send + Sync>>> {
     496            0 :     let storage = REMOTE_STORAGE
     497            0 :         .get()
     498            0 :         .context("Failed to get remote storage")?
     499            0 :         .as_ref()
     500            0 :         .context("No remote storage configured")?;
     501              : 
     502            0 :     info!("segment download about to start from remote path {file_path:?} at offset {offset}");
     503              : 
     504            0 :     let cancel = CancellationToken::new();
     505              : 
     506            0 :     let download = storage
     507            0 :         .download_storage_object(Some((offset, None)), file_path, &cancel)
     508            0 :         .await
     509            0 :         .with_context(|| {
     510            0 :             format!("Failed to open WAL segment download stream for remote path {file_path:?}")
     511            0 :         })?;
     512              : 
     513            0 :     let reader = tokio_util::io::StreamReader::new(download.download_stream);
     514            0 : 
     515            0 :     let reader = tokio::io::BufReader::with_capacity(BUFFER_SIZE, reader);
     516            0 : 
     517            0 :     Ok(Box::pin(reader))
     518            0 : }
     519              : 
     520              : /// Delete WAL files for the given timeline. Remote storage must be configured
     521              : /// when called.
     522            0 : pub async fn delete_timeline(ttid: &TenantTimelineId) -> Result<()> {
     523            0 :     let storage = get_configured_remote_storage();
     524            0 :     let remote_path = remote_timeline_path(ttid)?;
     525              : 
     526              :     // see DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
     527              :     // const Option unwrap is not stable, otherwise it would be const.
     528            0 :     let batch_size: NonZeroU32 = NonZeroU32::new(1000).unwrap();
     529            0 : 
     530            0 :     // A backoff::retry is used here for two reasons:
     531            0 :     // - To provide a backoff rather than busy-polling the API on errors
     532            0 :     // - To absorb transient 429/503 conditions without hitting our error
     533            0 :     //   logging path for issues deleting objects.
     534            0 :     //
     535            0 :     // Note: listing segments might take a long time if there are many of them.
     536            0 :     // We don't currently have http requests timeout cancellation, but if/once
     537            0 :     // we have listing should get streaming interface to make progress.
     538            0 : 
     539            0 :     let cancel = CancellationToken::new(); // not really used
     540            0 :     backoff::retry(
     541            0 :         || async {
     542              :             // Do list-delete in batch_size batches to make progress even if there a lot of files.
     543              :             // Alternatively we could make remote storage list return iterator, but it is more complicated and
     544              :             // I'm not sure deleting while iterating is expected in s3.
     545              :             loop {
     546            0 :                 let files = storage
     547            0 :                     .list(
     548            0 :                         Some(&remote_path),
     549            0 :                         ListingMode::NoDelimiter,
     550            0 :                         Some(batch_size),
     551            0 :                         &cancel,
     552            0 :                     )
     553            0 :                     .await?
     554              :                     .keys
     555            0 :                     .into_iter()
     556            0 :                     .map(|o| o.key)
     557            0 :                     .collect::<Vec<_>>();
     558            0 :                 if files.is_empty() {
     559            0 :                     return Ok(()); // done
     560            0 :                 }
     561            0 :                 // (at least) s3 results are sorted, so can log min/max:
     562            0 :                 // "List results are always returned in UTF-8 binary order."
     563            0 :                 info!(
     564            0 :                     "deleting batch of {} WAL segments [{}-{}]",
     565            0 :                     files.len(),
     566            0 :                     files.first().unwrap().object_name().unwrap_or(""),
     567            0 :                     files.last().unwrap().object_name().unwrap_or("")
     568              :                 );
     569            0 :                 storage.delete_objects(&files, &cancel).await?;
     570              :             }
     571            0 :         },
     572            0 :         // consider TimeoutOrCancel::caused_by_cancel when using cancellation
     573            0 :         |_| false,
     574            0 :         3,
     575            0 :         10,
     576            0 :         "executing WAL segments deletion batch",
     577            0 :         &cancel,
     578            0 :     )
     579            0 :     .await
     580            0 :     .ok_or_else(|| anyhow::anyhow!("canceled"))
     581            0 :     .and_then(|x| x)?;
     582              : 
     583            0 :     Ok(())
     584            0 : }
     585              : 
     586              : /// Used by wal_backup_partial.
     587            0 : pub async fn delete_objects(paths: &[RemotePath]) -> Result<()> {
     588            0 :     let cancel = CancellationToken::new(); // not really used
     589            0 :     let storage = get_configured_remote_storage();
     590            0 :     storage.delete_objects(paths, &cancel).await
     591            0 : }
     592              : 
     593              : /// Copy segments from one timeline to another. Used in copy_timeline.
     594            0 : pub async fn copy_s3_segments(
     595            0 :     wal_seg_size: usize,
     596            0 :     src_ttid: &TenantTimelineId,
     597            0 :     dst_ttid: &TenantTimelineId,
     598            0 :     from_segment: XLogSegNo,
     599            0 :     to_segment: XLogSegNo,
     600            0 : ) -> Result<()> {
     601              :     const SEGMENTS_PROGRESS_REPORT_INTERVAL: u64 = 1024;
     602              : 
     603            0 :     let storage = REMOTE_STORAGE
     604            0 :         .get()
     605            0 :         .expect("failed to get remote storage")
     606            0 :         .as_ref()
     607            0 :         .unwrap();
     608              : 
     609            0 :     let remote_dst_path = remote_timeline_path(dst_ttid)?;
     610              : 
     611            0 :     let cancel = CancellationToken::new();
     612              : 
     613            0 :     let files = storage
     614            0 :         .list(
     615            0 :             Some(&remote_dst_path),
     616            0 :             ListingMode::NoDelimiter,
     617            0 :             None,
     618            0 :             &cancel,
     619            0 :         )
     620            0 :         .await?
     621              :         .keys;
     622              : 
     623            0 :     let uploaded_segments = &files
     624            0 :         .iter()
     625            0 :         .filter_map(|o| o.key.object_name().map(ToOwned::to_owned))
     626            0 :         .collect::<HashSet<_>>();
     627            0 : 
     628            0 :     debug!(
     629            0 :         "these segments have already been uploaded: {:?}",
     630              :         uploaded_segments
     631              :     );
     632              : 
     633            0 :     for segno in from_segment..to_segment {
     634            0 :         if segno % SEGMENTS_PROGRESS_REPORT_INTERVAL == 0 {
     635            0 :             info!("copied all segments from {} until {}", from_segment, segno);
     636            0 :         }
     637              : 
     638            0 :         let segment_name = XLogFileName(PG_TLI, segno, wal_seg_size);
     639            0 :         if uploaded_segments.contains(&segment_name) {
     640            0 :             continue;
     641            0 :         }
     642            0 :         debug!("copying segment {}", segment_name);
     643              : 
     644            0 :         let from = remote_timeline_path(src_ttid)?.join(&segment_name);
     645            0 :         let to = remote_dst_path.join(&segment_name);
     646            0 : 
     647            0 :         storage.copy_object(&from, &to, &cancel).await?;
     648              :     }
     649              : 
     650            0 :     info!(
     651            0 :         "finished copying segments from {} until {}",
     652              :         from_segment, to_segment
     653              :     );
     654            0 :     Ok(())
     655            0 : }
     656              : 
     657              : /// Get S3 (remote_storage) prefix path used for timeline files.
     658            0 : pub fn remote_timeline_path(ttid: &TenantTimelineId) -> Result<RemotePath> {
     659            0 :     RemotePath::new(&Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string()))
     660            0 : }
        

Generated by: LCOV version 2.1-beta