Line data Source code
1 : use anyhow::{Context, Result};
2 :
3 : use camino::{Utf8Path, Utf8PathBuf};
4 : use futures::stream::FuturesOrdered;
5 : use futures::StreamExt;
6 : use tokio::task::JoinHandle;
7 : use tokio_util::sync::CancellationToken;
8 : use utils::backoff;
9 : use utils::id::NodeId;
10 :
11 : use std::cmp::min;
12 : use std::collections::HashSet;
13 : use std::num::NonZeroU32;
14 : use std::pin::Pin;
15 : use std::sync::Arc;
16 : use std::time::Duration;
17 :
18 : use postgres_ffi::v14::xlog_utils::XLogSegNoOffsetToRecPtr;
19 : use postgres_ffi::XLogFileName;
20 : use postgres_ffi::{XLogSegNo, PG_TLI};
21 : use remote_storage::{GenericRemoteStorage, ListingMode, RemotePath, StorageMetadata};
22 : use tokio::fs::File;
23 :
24 : use tokio::select;
25 : use tokio::sync::mpsc::{self, Receiver, Sender};
26 : use tokio::sync::watch;
27 : use tokio::time::sleep;
28 : use tracing::*;
29 :
30 : use utils::{id::TenantTimelineId, lsn::Lsn};
31 :
32 : use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS};
33 : use crate::timeline::{FullAccessTimeline, PeerInfo, Timeline};
34 : use crate::timeline_manager::StateSnapshot;
35 : use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME};
36 :
37 : use once_cell::sync::OnceCell;
38 :
39 : const UPLOAD_FAILURE_RETRY_MIN_MS: u64 = 10;
40 : const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000;
41 :
42 : /// Default buffer size when interfacing with [`tokio::fs::File`].
43 : const BUFFER_SIZE: usize = 32 * 1024;
44 :
45 : pub struct WalBackupTaskHandle {
46 : shutdown_tx: Sender<()>,
47 : handle: JoinHandle<()>,
48 : }
49 :
50 : /// Do we have anything to upload to S3, i.e. should safekeepers run backup activity?
51 0 : pub fn is_wal_backup_required(
52 0 : wal_seg_size: usize,
53 0 : num_computes: usize,
54 0 : state: &StateSnapshot,
55 0 : ) -> bool {
56 0 : num_computes > 0 ||
57 : // Currently only the whole segment is offloaded, so compare segment numbers.
58 0 : (state.commit_lsn.segment_number(wal_seg_size) > state.backup_lsn.segment_number(wal_seg_size))
59 0 : }
60 :
61 : /// Based on peer information determine which safekeeper should offload; if it
62 : /// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task
63 : /// is running, kill it.
64 0 : pub async fn update_task(
65 0 : conf: &SafeKeeperConf,
66 0 : tli: &Arc<Timeline>,
67 0 : need_backup: bool,
68 0 : state: &StateSnapshot,
69 0 : entry: &mut Option<WalBackupTaskHandle>,
70 0 : ) {
71 0 : let (offloader, election_dbg_str) =
72 0 : determine_offloader(&state.peers, state.backup_lsn, tli.ttid, conf);
73 0 : let elected_me = Some(conf.my_id) == offloader;
74 :
75 0 : let should_task_run = need_backup && elected_me;
76 :
77 : // start or stop the task
78 0 : if should_task_run != (entry.is_some()) {
79 0 : if should_task_run {
80 0 : info!("elected for backup: {}", election_dbg_str);
81 :
82 0 : let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
83 0 :
84 0 : let async_task = backup_task_main(tli.clone(), conf.backup_parallel_jobs, shutdown_rx);
85 :
86 0 : let handle = if conf.current_thread_runtime {
87 0 : tokio::spawn(async_task)
88 : } else {
89 0 : WAL_BACKUP_RUNTIME.spawn(async_task)
90 : };
91 :
92 0 : *entry = Some(WalBackupTaskHandle {
93 0 : shutdown_tx,
94 0 : handle,
95 0 : });
96 : } else {
97 0 : if !need_backup {
98 : // don't need backup at all
99 0 : info!("stepping down from backup, need_backup={}", need_backup);
100 : } else {
101 : // someone else has been elected
102 0 : info!("stepping down from backup: {}", election_dbg_str);
103 : }
104 0 : shut_down_task(entry).await;
105 : }
106 0 : }
107 0 : }
108 :
109 0 : async fn shut_down_task(entry: &mut Option<WalBackupTaskHandle>) {
110 0 : if let Some(wb_handle) = entry.take() {
111 : // Tell the task to shutdown. Error means task exited earlier, that's ok.
112 0 : let _ = wb_handle.shutdown_tx.send(()).await;
113 : // Await the task itself. TODO: restart panicked tasks earlier.
114 0 : if let Err(e) = wb_handle.handle.await {
115 0 : warn!("WAL backup task panicked: {}", e);
116 0 : }
117 0 : }
118 0 : }
119 :
120 : /// The goal is to ensure that normally only one safekeepers offloads. However,
121 : /// it is fine (and inevitable, as s3 doesn't provide CAS) that for some short
122 : /// time we have several ones as they PUT the same files. Also,
123 : /// - frequently changing the offloader would be bad;
124 : /// - electing seriously lagging safekeeper is undesirable;
125 : /// So we deterministically choose among the reasonably caught up candidates.
126 : /// TODO: take into account failed attempts to deal with hypothetical situation
127 : /// where s3 is unreachable only for some sks.
128 0 : fn determine_offloader(
129 0 : alive_peers: &[PeerInfo],
130 0 : wal_backup_lsn: Lsn,
131 0 : ttid: TenantTimelineId,
132 0 : conf: &SafeKeeperConf,
133 0 : ) -> (Option<NodeId>, String) {
134 0 : // TODO: remove this once we fill newly joined safekeepers since backup_lsn.
135 0 : let capable_peers = alive_peers
136 0 : .iter()
137 0 : .filter(|p| p.local_start_lsn <= wal_backup_lsn);
138 0 : match capable_peers.clone().map(|p| p.commit_lsn).max() {
139 0 : None => (None, "no connected peers to elect from".to_string()),
140 0 : Some(max_commit_lsn) => {
141 0 : let threshold = max_commit_lsn
142 0 : .checked_sub(conf.max_offloader_lag_bytes)
143 0 : .unwrap_or(Lsn(0));
144 0 : let mut caughtup_peers = capable_peers
145 0 : .clone()
146 0 : .filter(|p| p.commit_lsn >= threshold)
147 0 : .collect::<Vec<_>>();
148 0 : caughtup_peers.sort_by(|p1, p2| p1.sk_id.cmp(&p2.sk_id));
149 0 :
150 0 : // To distribute the load, shift by timeline_id.
151 0 : let offloader = caughtup_peers
152 0 : [(u128::from(ttid.timeline_id) % caughtup_peers.len() as u128) as usize]
153 0 : .sk_id;
154 0 :
155 0 : let mut capable_peers_dbg = capable_peers
156 0 : .map(|p| (p.sk_id, p.commit_lsn))
157 0 : .collect::<Vec<_>>();
158 0 : capable_peers_dbg.sort_by(|p1, p2| p1.0.cmp(&p2.0));
159 0 : (
160 0 : Some(offloader),
161 0 : format!(
162 0 : "elected {} among {:?} peers, with {} of them being caughtup",
163 0 : offloader,
164 0 : capable_peers_dbg,
165 0 : caughtup_peers.len()
166 0 : ),
167 0 : )
168 : }
169 : }
170 0 : }
171 :
172 : static REMOTE_STORAGE: OnceCell<Option<GenericRemoteStorage>> = OnceCell::new();
173 :
174 : // Storage must be configured and initialized when this is called.
175 0 : fn get_configured_remote_storage() -> &'static GenericRemoteStorage {
176 0 : REMOTE_STORAGE
177 0 : .get()
178 0 : .expect("failed to get remote storage")
179 0 : .as_ref()
180 0 : .unwrap()
181 0 : }
182 :
183 0 : pub fn init_remote_storage(conf: &SafeKeeperConf) {
184 0 : // TODO: refactor REMOTE_STORAGE to avoid using global variables, and provide
185 0 : // dependencies to all tasks instead.
186 0 : REMOTE_STORAGE.get_or_init(|| {
187 0 : conf.remote_storage
188 0 : .as_ref()
189 0 : .map(|c| GenericRemoteStorage::from_config(c).expect("failed to create remote storage"))
190 0 : });
191 0 : }
192 :
193 : struct WalBackupTask {
194 : timeline: FullAccessTimeline,
195 : timeline_dir: Utf8PathBuf,
196 : wal_seg_size: usize,
197 : parallel_jobs: usize,
198 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
199 : }
200 :
201 : /// Offload single timeline.
202 0 : #[instrument(name = "WAL backup", skip_all, fields(ttid = %tli.ttid))]
203 : async fn backup_task_main(tli: Arc<Timeline>, parallel_jobs: usize, mut shutdown_rx: Receiver<()>) {
204 : let _guard = WAL_BACKUP_TASKS.guard();
205 :
206 : let tli = match tli.full_access_guard().await {
207 : Ok(tli) => tli,
208 : Err(e) => {
209 : error!("backup error: {}", e);
210 : return;
211 : }
212 : };
213 : info!("started");
214 :
215 : let mut wb = WalBackupTask {
216 : wal_seg_size: tli.get_wal_seg_size().await,
217 : commit_lsn_watch_rx: tli.get_commit_lsn_watch_rx(),
218 : timeline_dir: tli.get_timeline_dir(),
219 : timeline: tli,
220 : parallel_jobs,
221 : };
222 :
223 : // task is spinned up only when wal_seg_size already initialized
224 : assert!(wb.wal_seg_size > 0);
225 :
226 : let mut canceled = false;
227 : select! {
228 : _ = wb.run() => {}
229 : _ = shutdown_rx.recv() => {
230 : canceled = true;
231 : }
232 : }
233 : info!("task {}", if canceled { "canceled" } else { "terminated" });
234 : }
235 :
236 : impl WalBackupTask {
237 0 : async fn run(&mut self) {
238 0 : let mut backup_lsn = Lsn(0);
239 0 :
240 0 : let mut retry_attempt = 0u32;
241 : // offload loop
242 0 : loop {
243 0 : if retry_attempt == 0 {
244 : // wait for new WAL to arrive
245 0 : if let Err(e) = self.commit_lsn_watch_rx.changed().await {
246 : // should never happen, as we hold Arc to timeline.
247 0 : error!("commit_lsn watch shut down: {:?}", e);
248 0 : return;
249 0 : }
250 : } else {
251 : // or just sleep if we errored previously
252 0 : let mut retry_delay = UPLOAD_FAILURE_RETRY_MAX_MS;
253 0 : if let Some(backoff_delay) = UPLOAD_FAILURE_RETRY_MIN_MS.checked_shl(retry_attempt)
254 0 : {
255 0 : retry_delay = min(retry_delay, backoff_delay);
256 0 : }
257 0 : sleep(Duration::from_millis(retry_delay)).await;
258 : }
259 :
260 0 : let commit_lsn = *self.commit_lsn_watch_rx.borrow();
261 0 :
262 0 : // Note that backup_lsn can be higher than commit_lsn if we
263 0 : // don't have much local WAL and others already uploaded
264 0 : // segments we don't even have.
265 0 : if backup_lsn.segment_number(self.wal_seg_size)
266 0 : >= commit_lsn.segment_number(self.wal_seg_size)
267 : {
268 0 : retry_attempt = 0;
269 0 : continue; /* nothing to do, common case as we wake up on every commit_lsn bump */
270 0 : }
271 0 : // Perhaps peers advanced the position, check shmem value.
272 0 : backup_lsn = self.timeline.get_wal_backup_lsn().await;
273 0 : if backup_lsn.segment_number(self.wal_seg_size)
274 0 : >= commit_lsn.segment_number(self.wal_seg_size)
275 : {
276 0 : retry_attempt = 0;
277 0 : continue;
278 0 : }
279 0 :
280 0 : match backup_lsn_range(
281 0 : &self.timeline,
282 0 : &mut backup_lsn,
283 0 : commit_lsn,
284 0 : self.wal_seg_size,
285 0 : &self.timeline_dir,
286 0 : self.parallel_jobs,
287 0 : )
288 0 : .await
289 : {
290 0 : Ok(()) => {
291 0 : retry_attempt = 0;
292 0 : }
293 0 : Err(e) => {
294 0 : error!(
295 0 : "failed while offloading range {}-{}: {:?}",
296 : backup_lsn, commit_lsn, e
297 : );
298 :
299 0 : retry_attempt = retry_attempt.saturating_add(1);
300 : }
301 : }
302 : }
303 0 : }
304 : }
305 :
306 0 : async fn backup_lsn_range(
307 0 : timeline: &FullAccessTimeline,
308 0 : backup_lsn: &mut Lsn,
309 0 : end_lsn: Lsn,
310 0 : wal_seg_size: usize,
311 0 : timeline_dir: &Utf8Path,
312 0 : parallel_jobs: usize,
313 0 : ) -> Result<()> {
314 0 : if parallel_jobs < 1 {
315 0 : anyhow::bail!("parallel_jobs must be >= 1");
316 0 : }
317 :
318 0 : let remote_timeline_path = remote_timeline_path(&timeline.ttid)?;
319 0 : let start_lsn = *backup_lsn;
320 0 : let segments = get_segments(start_lsn, end_lsn, wal_seg_size);
321 0 :
322 0 : // Pool of concurrent upload tasks. We use `FuturesOrdered` to
323 0 : // preserve order of uploads, and update `backup_lsn` only after
324 0 : // all previous uploads are finished.
325 0 : let mut uploads = FuturesOrdered::new();
326 0 : let mut iter = segments.iter();
327 :
328 : loop {
329 0 : let added_task = match iter.next() {
330 0 : Some(s) => {
331 0 : uploads.push_back(backup_single_segment(
332 0 : s,
333 0 : timeline_dir,
334 0 : &remote_timeline_path,
335 0 : ));
336 0 : true
337 : }
338 0 : None => false,
339 : };
340 :
341 : // Wait for the next segment to upload if we don't have any more segments,
342 : // or if we have too many concurrent uploads.
343 0 : if !added_task || uploads.len() >= parallel_jobs {
344 0 : let next = uploads.next().await;
345 0 : if let Some(res) = next {
346 : // next segment uploaded
347 0 : let segment = res?;
348 0 : let new_backup_lsn = segment.end_lsn;
349 0 : timeline
350 0 : .set_wal_backup_lsn(new_backup_lsn)
351 0 : .await
352 0 : .context("setting wal_backup_lsn")?;
353 0 : *backup_lsn = new_backup_lsn;
354 : } else {
355 : // no more segments to upload
356 0 : break;
357 : }
358 0 : }
359 : }
360 :
361 0 : info!(
362 0 : "offloaded segnos {:?} up to {}, previous backup_lsn {}",
363 0 : segments.iter().map(|&s| s.seg_no).collect::<Vec<_>>(),
364 : end_lsn,
365 : start_lsn,
366 : );
367 0 : Ok(())
368 0 : }
369 :
370 0 : async fn backup_single_segment(
371 0 : seg: &Segment,
372 0 : timeline_dir: &Utf8Path,
373 0 : remote_timeline_path: &RemotePath,
374 0 : ) -> Result<Segment> {
375 0 : let segment_file_path = seg.file_path(timeline_dir)?;
376 0 : let remote_segment_path = seg.remote_path(remote_timeline_path);
377 :
378 0 : let res = backup_object(&segment_file_path, &remote_segment_path, seg.size()).await;
379 0 : if res.is_ok() {
380 0 : BACKED_UP_SEGMENTS.inc();
381 0 : } else {
382 0 : BACKUP_ERRORS.inc();
383 0 : }
384 0 : res?;
385 0 : debug!("Backup of {} done", segment_file_path);
386 :
387 0 : Ok(*seg)
388 0 : }
389 :
390 : #[derive(Debug, Copy, Clone)]
391 : pub struct Segment {
392 : seg_no: XLogSegNo,
393 : start_lsn: Lsn,
394 : end_lsn: Lsn,
395 : }
396 :
397 : impl Segment {
398 0 : pub fn new(seg_no: u64, start_lsn: Lsn, end_lsn: Lsn) -> Self {
399 0 : Self {
400 0 : seg_no,
401 0 : start_lsn,
402 0 : end_lsn,
403 0 : }
404 0 : }
405 :
406 0 : pub fn object_name(self) -> String {
407 0 : XLogFileName(PG_TLI, self.seg_no, self.size())
408 0 : }
409 :
410 0 : pub fn file_path(self, timeline_dir: &Utf8Path) -> Result<Utf8PathBuf> {
411 0 : Ok(timeline_dir.join(self.object_name()))
412 0 : }
413 :
414 0 : pub fn remote_path(self, remote_timeline_path: &RemotePath) -> RemotePath {
415 0 : remote_timeline_path.join(self.object_name())
416 0 : }
417 :
418 0 : pub fn size(self) -> usize {
419 0 : (u64::from(self.end_lsn) - u64::from(self.start_lsn)) as usize
420 0 : }
421 : }
422 :
423 0 : fn get_segments(start: Lsn, end: Lsn, seg_size: usize) -> Vec<Segment> {
424 0 : let first_seg = start.segment_number(seg_size);
425 0 : let last_seg = end.segment_number(seg_size);
426 0 :
427 0 : let res: Vec<Segment> = (first_seg..last_seg)
428 0 : .map(|s| {
429 0 : let start_lsn = XLogSegNoOffsetToRecPtr(s, 0, seg_size);
430 0 : let end_lsn = XLogSegNoOffsetToRecPtr(s + 1, 0, seg_size);
431 0 : Segment::new(s, Lsn::from(start_lsn), Lsn::from(end_lsn))
432 0 : })
433 0 : .collect();
434 0 : res
435 0 : }
436 :
437 0 : async fn backup_object(
438 0 : source_file: &Utf8Path,
439 0 : target_file: &RemotePath,
440 0 : size: usize,
441 0 : ) -> Result<()> {
442 0 : let storage = get_configured_remote_storage();
443 :
444 0 : let file = File::open(&source_file)
445 0 : .await
446 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
447 :
448 0 : let file = tokio_util::io::ReaderStream::with_capacity(file, BUFFER_SIZE);
449 0 :
450 0 : let cancel = CancellationToken::new();
451 0 :
452 0 : storage
453 0 : .upload_storage_object(file, size, target_file, &cancel)
454 0 : .await
455 0 : }
456 :
457 0 : pub(crate) async fn backup_partial_segment(
458 0 : source_file: &Utf8Path,
459 0 : target_file: &RemotePath,
460 0 : size: usize,
461 0 : ) -> Result<()> {
462 0 : let storage = get_configured_remote_storage();
463 :
464 0 : let file = File::open(&source_file)
465 0 : .await
466 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
467 :
468 : // limiting the file to read only the first `size` bytes
469 0 : let limited_file = tokio::io::AsyncReadExt::take(file, size as u64);
470 0 :
471 0 : let file = tokio_util::io::ReaderStream::with_capacity(limited_file, BUFFER_SIZE);
472 0 :
473 0 : let cancel = CancellationToken::new();
474 0 :
475 0 : storage
476 0 : .upload(
477 0 : file,
478 0 : size,
479 0 : target_file,
480 0 : Some(StorageMetadata::from([("sk_type", "partial_segment")])),
481 0 : &cancel,
482 0 : )
483 0 : .await
484 0 : }
485 :
486 0 : pub async fn read_object(
487 0 : file_path: &RemotePath,
488 0 : offset: u64,
489 0 : ) -> anyhow::Result<Pin<Box<dyn tokio::io::AsyncRead + Send + Sync>>> {
490 0 : let storage = REMOTE_STORAGE
491 0 : .get()
492 0 : .context("Failed to get remote storage")?
493 0 : .as_ref()
494 0 : .context("No remote storage configured")?;
495 :
496 0 : info!("segment download about to start from remote path {file_path:?} at offset {offset}");
497 :
498 0 : let cancel = CancellationToken::new();
499 :
500 0 : let download = storage
501 0 : .download_storage_object(Some((offset, None)), file_path, &cancel)
502 0 : .await
503 0 : .with_context(|| {
504 0 : format!("Failed to open WAL segment download stream for remote path {file_path:?}")
505 0 : })?;
506 :
507 0 : let reader = tokio_util::io::StreamReader::new(download.download_stream);
508 0 :
509 0 : let reader = tokio::io::BufReader::with_capacity(BUFFER_SIZE, reader);
510 0 :
511 0 : Ok(Box::pin(reader))
512 0 : }
513 :
514 : /// Delete WAL files for the given timeline. Remote storage must be configured
515 : /// when called.
516 0 : pub async fn delete_timeline(ttid: &TenantTimelineId) -> Result<()> {
517 0 : let storage = get_configured_remote_storage();
518 0 : let remote_path = remote_timeline_path(ttid)?;
519 :
520 : // see DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
521 : // const Option unwrap is not stable, otherwise it would be const.
522 0 : let batch_size: NonZeroU32 = NonZeroU32::new(1000).unwrap();
523 0 :
524 0 : // A backoff::retry is used here for two reasons:
525 0 : // - To provide a backoff rather than busy-polling the API on errors
526 0 : // - To absorb transient 429/503 conditions without hitting our error
527 0 : // logging path for issues deleting objects.
528 0 : //
529 0 : // Note: listing segments might take a long time if there are many of them.
530 0 : // We don't currently have http requests timeout cancellation, but if/once
531 0 : // we have listing should get streaming interface to make progress.
532 0 :
533 0 : let cancel = CancellationToken::new(); // not really used
534 0 : backoff::retry(
535 0 : || async {
536 0 : // Do list-delete in batch_size batches to make progress even if there a lot of files.
537 0 : // Alternatively we could make remote storage list return iterator, but it is more complicated and
538 0 : // I'm not sure deleting while iterating is expected in s3.
539 0 : loop {
540 0 : let files = storage
541 0 : .list(
542 0 : Some(&remote_path),
543 0 : ListingMode::NoDelimiter,
544 0 : Some(batch_size),
545 0 : &cancel,
546 0 : )
547 0 : .await?
548 0 : .keys;
549 0 : if files.is_empty() {
550 0 : return Ok(()); // done
551 0 : }
552 0 : // (at least) s3 results are sorted, so can log min/max:
553 0 : // "List results are always returned in UTF-8 binary order."
554 0 : info!(
555 0 : "deleting batch of {} WAL segments [{}-{}]",
556 0 : files.len(),
557 0 : files.first().unwrap().object_name().unwrap_or(""),
558 0 : files.last().unwrap().object_name().unwrap_or("")
559 0 : );
560 0 : storage.delete_objects(&files, &cancel).await?;
561 0 : }
562 0 : },
563 0 : // consider TimeoutOrCancel::caused_by_cancel when using cancellation
564 0 : |_| false,
565 0 : 3,
566 0 : 10,
567 0 : "executing WAL segments deletion batch",
568 0 : &cancel,
569 0 : )
570 0 : .await
571 0 : .ok_or_else(|| anyhow::anyhow!("canceled"))
572 0 : .and_then(|x| x)?;
573 :
574 0 : Ok(())
575 0 : }
576 :
577 : /// Used by wal_backup_partial.
578 0 : pub async fn delete_objects(paths: &[RemotePath]) -> Result<()> {
579 0 : let cancel = CancellationToken::new(); // not really used
580 0 : let storage = get_configured_remote_storage();
581 0 : storage.delete_objects(paths, &cancel).await
582 0 : }
583 :
584 : /// Copy segments from one timeline to another. Used in copy_timeline.
585 0 : pub async fn copy_s3_segments(
586 0 : wal_seg_size: usize,
587 0 : src_ttid: &TenantTimelineId,
588 0 : dst_ttid: &TenantTimelineId,
589 0 : from_segment: XLogSegNo,
590 0 : to_segment: XLogSegNo,
591 0 : ) -> Result<()> {
592 0 : const SEGMENTS_PROGRESS_REPORT_INTERVAL: u64 = 1024;
593 0 :
594 0 : let storage = REMOTE_STORAGE
595 0 : .get()
596 0 : .expect("failed to get remote storage")
597 0 : .as_ref()
598 0 : .unwrap();
599 :
600 0 : let remote_dst_path = remote_timeline_path(dst_ttid)?;
601 :
602 0 : let cancel = CancellationToken::new();
603 :
604 0 : let files = storage
605 0 : .list(
606 0 : Some(&remote_dst_path),
607 0 : ListingMode::NoDelimiter,
608 0 : None,
609 0 : &cancel,
610 0 : )
611 0 : .await?
612 : .keys;
613 :
614 0 : let uploaded_segments = &files
615 0 : .iter()
616 0 : .filter_map(|file| file.object_name().map(ToOwned::to_owned))
617 0 : .collect::<HashSet<_>>();
618 0 :
619 0 : debug!(
620 0 : "these segments have already been uploaded: {:?}",
621 : uploaded_segments
622 : );
623 :
624 0 : for segno in from_segment..to_segment {
625 0 : if segno % SEGMENTS_PROGRESS_REPORT_INTERVAL == 0 {
626 0 : info!("copied all segments from {} until {}", from_segment, segno);
627 0 : }
628 :
629 0 : let segment_name = XLogFileName(PG_TLI, segno, wal_seg_size);
630 0 : if uploaded_segments.contains(&segment_name) {
631 0 : continue;
632 0 : }
633 0 : debug!("copying segment {}", segment_name);
634 :
635 0 : let from = remote_timeline_path(src_ttid)?.join(&segment_name);
636 0 : let to = remote_dst_path.join(&segment_name);
637 0 :
638 0 : storage.copy_object(&from, &to, &cancel).await?;
639 : }
640 :
641 0 : info!(
642 0 : "finished copying segments from {} until {}",
643 : from_segment, to_segment
644 : );
645 0 : Ok(())
646 0 : }
647 :
648 : /// Get S3 (remote_storage) prefix path used for timeline files.
649 0 : pub fn remote_timeline_path(ttid: &TenantTimelineId) -> Result<RemotePath> {
650 0 : RemotePath::new(&Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string()))
651 0 : }
|