Line data Source code
1 : use anyhow::{Context, Result};
2 :
3 : use camino::{Utf8Path, Utf8PathBuf};
4 : use futures::stream::FuturesOrdered;
5 : use futures::StreamExt;
6 : use safekeeper_api::models::PeerInfo;
7 : use tokio::task::JoinHandle;
8 : use tokio_util::sync::CancellationToken;
9 : use utils::backoff;
10 : use utils::id::NodeId;
11 :
12 : use std::cmp::min;
13 : use std::collections::HashSet;
14 : use std::num::NonZeroU32;
15 : use std::pin::Pin;
16 : use std::time::Duration;
17 :
18 : use postgres_ffi::v14::xlog_utils::XLogSegNoOffsetToRecPtr;
19 : use postgres_ffi::XLogFileName;
20 : use postgres_ffi::{XLogSegNo, PG_TLI};
21 : use remote_storage::{
22 : DownloadOpts, GenericRemoteStorage, ListingMode, RemotePath, StorageMetadata,
23 : };
24 : use tokio::fs::File;
25 :
26 : use tokio::select;
27 : use tokio::sync::mpsc::{self, Receiver, Sender};
28 : use tokio::sync::{watch, OnceCell};
29 : use tracing::*;
30 :
31 : use utils::{id::TenantTimelineId, lsn::Lsn};
32 :
33 : use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS};
34 : use crate::timeline::WalResidentTimeline;
35 : use crate::timeline_manager::{Manager, StateSnapshot};
36 : use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME};
37 :
38 : const UPLOAD_FAILURE_RETRY_MIN_MS: u64 = 10;
39 : const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000;
40 :
41 : /// Default buffer size when interfacing with [`tokio::fs::File`].
42 : const BUFFER_SIZE: usize = 32 * 1024;
43 :
44 : pub struct WalBackupTaskHandle {
45 : shutdown_tx: Sender<()>,
46 : handle: JoinHandle<()>,
47 : }
48 :
49 : impl WalBackupTaskHandle {
50 0 : pub(crate) async fn join(self) {
51 0 : if let Err(e) = self.handle.await {
52 0 : error!("WAL backup task panicked: {}", e);
53 0 : }
54 0 : }
55 : }
56 :
57 : /// Do we have anything to upload to S3, i.e. should safekeepers run backup activity?
58 0 : pub(crate) fn is_wal_backup_required(
59 0 : wal_seg_size: usize,
60 0 : num_computes: usize,
61 0 : state: &StateSnapshot,
62 0 : ) -> bool {
63 0 : num_computes > 0 ||
64 : // Currently only the whole segment is offloaded, so compare segment numbers.
65 0 : (state.commit_lsn.segment_number(wal_seg_size) > state.backup_lsn.segment_number(wal_seg_size))
66 0 : }
67 :
68 : /// Based on peer information determine which safekeeper should offload; if it
69 : /// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task
70 : /// is running, kill it.
71 0 : pub(crate) async fn update_task(mgr: &mut Manager, need_backup: bool, state: &StateSnapshot) {
72 0 : let (offloader, election_dbg_str) =
73 0 : determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf);
74 0 : let elected_me = Some(mgr.conf.my_id) == offloader;
75 :
76 0 : let should_task_run = need_backup && elected_me;
77 :
78 : // start or stop the task
79 0 : if should_task_run != (mgr.backup_task.is_some()) {
80 0 : if should_task_run {
81 0 : info!("elected for backup: {}", election_dbg_str);
82 :
83 0 : let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
84 :
85 0 : let Ok(resident) = mgr.wal_resident_timeline() else {
86 0 : info!("Timeline shut down");
87 0 : return;
88 : };
89 :
90 0 : let async_task = backup_task_main(resident, mgr.conf.backup_parallel_jobs, shutdown_rx);
91 :
92 0 : let handle = if mgr.conf.current_thread_runtime {
93 0 : tokio::spawn(async_task)
94 : } else {
95 0 : WAL_BACKUP_RUNTIME.spawn(async_task)
96 : };
97 :
98 0 : mgr.backup_task = Some(WalBackupTaskHandle {
99 0 : shutdown_tx,
100 0 : handle,
101 0 : });
102 : } else {
103 0 : if !need_backup {
104 : // don't need backup at all
105 0 : info!("stepping down from backup, need_backup={}", need_backup);
106 : } else {
107 : // someone else has been elected
108 0 : info!("stepping down from backup: {}", election_dbg_str);
109 : }
110 0 : shut_down_task(&mut mgr.backup_task).await;
111 : }
112 0 : }
113 0 : }
114 :
115 0 : async fn shut_down_task(entry: &mut Option<WalBackupTaskHandle>) {
116 0 : if let Some(wb_handle) = entry.take() {
117 : // Tell the task to shutdown. Error means task exited earlier, that's ok.
118 0 : let _ = wb_handle.shutdown_tx.send(()).await;
119 : // Await the task itself. TODO: restart panicked tasks earlier.
120 0 : wb_handle.join().await;
121 0 : }
122 0 : }
123 :
124 : /// The goal is to ensure that normally only one safekeepers offloads. However,
125 : /// it is fine (and inevitable, as s3 doesn't provide CAS) that for some short
126 : /// time we have several ones as they PUT the same files. Also,
127 : /// - frequently changing the offloader would be bad;
128 : /// - electing seriously lagging safekeeper is undesirable;
129 : ///
130 : /// So we deterministically choose among the reasonably caught up candidates.
131 : /// TODO: take into account failed attempts to deal with hypothetical situation
132 : /// where s3 is unreachable only for some sks.
133 0 : fn determine_offloader(
134 0 : alive_peers: &[PeerInfo],
135 0 : wal_backup_lsn: Lsn,
136 0 : ttid: TenantTimelineId,
137 0 : conf: &SafeKeeperConf,
138 0 : ) -> (Option<NodeId>, String) {
139 0 : // TODO: remove this once we fill newly joined safekeepers since backup_lsn.
140 0 : let capable_peers = alive_peers
141 0 : .iter()
142 0 : .filter(|p| p.local_start_lsn <= wal_backup_lsn);
143 0 : match capable_peers.clone().map(|p| p.commit_lsn).max() {
144 0 : None => (None, "no connected peers to elect from".to_string()),
145 0 : Some(max_commit_lsn) => {
146 0 : let threshold = max_commit_lsn
147 0 : .checked_sub(conf.max_offloader_lag_bytes)
148 0 : .unwrap_or(Lsn(0));
149 0 : let mut caughtup_peers = capable_peers
150 0 : .clone()
151 0 : .filter(|p| p.commit_lsn >= threshold)
152 0 : .collect::<Vec<_>>();
153 0 : caughtup_peers.sort_by(|p1, p2| p1.sk_id.cmp(&p2.sk_id));
154 0 :
155 0 : // To distribute the load, shift by timeline_id.
156 0 : let offloader = caughtup_peers
157 0 : [(u128::from(ttid.timeline_id) % caughtup_peers.len() as u128) as usize]
158 0 : .sk_id;
159 0 :
160 0 : let mut capable_peers_dbg = capable_peers
161 0 : .map(|p| (p.sk_id, p.commit_lsn))
162 0 : .collect::<Vec<_>>();
163 0 : capable_peers_dbg.sort_by(|p1, p2| p1.0.cmp(&p2.0));
164 0 : (
165 0 : Some(offloader),
166 0 : format!(
167 0 : "elected {} among {:?} peers, with {} of them being caughtup",
168 0 : offloader,
169 0 : capable_peers_dbg,
170 0 : caughtup_peers.len()
171 0 : ),
172 0 : )
173 : }
174 : }
175 0 : }
176 :
177 : static REMOTE_STORAGE: OnceCell<Option<GenericRemoteStorage>> = OnceCell::const_new();
178 :
179 : // Storage must be configured and initialized when this is called.
180 0 : fn get_configured_remote_storage() -> &'static GenericRemoteStorage {
181 0 : REMOTE_STORAGE
182 0 : .get()
183 0 : .expect("failed to get remote storage")
184 0 : .as_ref()
185 0 : .unwrap()
186 0 : }
187 :
188 0 : pub async fn init_remote_storage(conf: &SafeKeeperConf) {
189 0 : // TODO: refactor REMOTE_STORAGE to avoid using global variables, and provide
190 0 : // dependencies to all tasks instead.
191 0 : REMOTE_STORAGE
192 0 : .get_or_init(|| async {
193 0 : if let Some(conf) = conf.remote_storage.as_ref() {
194 : Some(
195 0 : GenericRemoteStorage::from_config(conf)
196 0 : .await
197 0 : .expect("failed to create remote storage"),
198 : )
199 : } else {
200 0 : None
201 : }
202 0 : })
203 0 : .await;
204 0 : }
205 :
206 : struct WalBackupTask {
207 : timeline: WalResidentTimeline,
208 : timeline_dir: Utf8PathBuf,
209 : wal_seg_size: usize,
210 : parallel_jobs: usize,
211 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
212 : }
213 :
214 : /// Offload single timeline.
215 0 : #[instrument(name = "wal_backup", skip_all, fields(ttid = %tli.ttid))]
216 : async fn backup_task_main(
217 : tli: WalResidentTimeline,
218 : parallel_jobs: usize,
219 : mut shutdown_rx: Receiver<()>,
220 : ) {
221 : let _guard = WAL_BACKUP_TASKS.guard();
222 : info!("started");
223 :
224 : let cancel = tli.tli.cancel.clone();
225 : let mut wb = WalBackupTask {
226 : wal_seg_size: tli.get_wal_seg_size().await,
227 : commit_lsn_watch_rx: tli.get_commit_lsn_watch_rx(),
228 : timeline_dir: tli.get_timeline_dir(),
229 : timeline: tli,
230 : parallel_jobs,
231 : };
232 :
233 : // task is spinned up only when wal_seg_size already initialized
234 : assert!(wb.wal_seg_size > 0);
235 :
236 : let mut canceled = false;
237 : select! {
238 : _ = wb.run() => {}
239 : _ = shutdown_rx.recv() => {
240 : canceled = true;
241 : },
242 : _ = cancel.cancelled() => {
243 : canceled = true;
244 : }
245 : }
246 : info!("task {}", if canceled { "canceled" } else { "terminated" });
247 : }
248 :
249 : impl WalBackupTask {
250 : /// This function must be called from a select! that also respects self.timeline's
251 : /// cancellation token. This is done in [`backup_task_main`].
252 : ///
253 : /// The future returned by this function is safe to drop at any time because it
254 : /// does not write to local disk.
255 0 : async fn run(&mut self) {
256 0 : let mut backup_lsn = Lsn(0);
257 0 :
258 0 : let mut retry_attempt = 0u32;
259 : // offload loop
260 0 : while !self.timeline.cancel.is_cancelled() {
261 0 : if retry_attempt == 0 {
262 : // wait for new WAL to arrive
263 0 : if let Err(e) = self.commit_lsn_watch_rx.changed().await {
264 : // should never happen, as we hold Arc to timeline and transmitter's lifetime
265 : // is within Timeline's
266 0 : error!("commit_lsn watch shut down: {:?}", e);
267 0 : return;
268 0 : };
269 : } else {
270 : // or just sleep if we errored previously
271 0 : let mut retry_delay = UPLOAD_FAILURE_RETRY_MAX_MS;
272 0 : if let Some(backoff_delay) = UPLOAD_FAILURE_RETRY_MIN_MS.checked_shl(retry_attempt)
273 0 : {
274 0 : retry_delay = min(retry_delay, backoff_delay);
275 0 : }
276 0 : tokio::time::sleep(Duration::from_millis(retry_delay)).await;
277 : }
278 :
279 0 : let commit_lsn = *self.commit_lsn_watch_rx.borrow();
280 0 :
281 0 : // Note that backup_lsn can be higher than commit_lsn if we
282 0 : // don't have much local WAL and others already uploaded
283 0 : // segments we don't even have.
284 0 : if backup_lsn.segment_number(self.wal_seg_size)
285 0 : >= commit_lsn.segment_number(self.wal_seg_size)
286 : {
287 0 : retry_attempt = 0;
288 0 : continue; /* nothing to do, common case as we wake up on every commit_lsn bump */
289 0 : }
290 0 : // Perhaps peers advanced the position, check shmem value.
291 0 : backup_lsn = self.timeline.get_wal_backup_lsn().await;
292 0 : if backup_lsn.segment_number(self.wal_seg_size)
293 0 : >= commit_lsn.segment_number(self.wal_seg_size)
294 : {
295 0 : retry_attempt = 0;
296 0 : continue;
297 0 : }
298 0 :
299 0 : match backup_lsn_range(
300 0 : &self.timeline,
301 0 : &mut backup_lsn,
302 0 : commit_lsn,
303 0 : self.wal_seg_size,
304 0 : &self.timeline_dir,
305 0 : self.parallel_jobs,
306 0 : )
307 0 : .await
308 : {
309 0 : Ok(()) => {
310 0 : retry_attempt = 0;
311 0 : }
312 0 : Err(e) => {
313 0 : error!(
314 0 : "failed while offloading range {}-{}: {:?}",
315 : backup_lsn, commit_lsn, e
316 : );
317 :
318 0 : retry_attempt = retry_attempt.saturating_add(1);
319 : }
320 : }
321 : }
322 0 : }
323 : }
324 :
325 0 : async fn backup_lsn_range(
326 0 : timeline: &WalResidentTimeline,
327 0 : backup_lsn: &mut Lsn,
328 0 : end_lsn: Lsn,
329 0 : wal_seg_size: usize,
330 0 : timeline_dir: &Utf8Path,
331 0 : parallel_jobs: usize,
332 0 : ) -> Result<()> {
333 0 : if parallel_jobs < 1 {
334 0 : anyhow::bail!("parallel_jobs must be >= 1");
335 0 : }
336 0 :
337 0 : let remote_timeline_path = &timeline.remote_path;
338 0 : let start_lsn = *backup_lsn;
339 0 : let segments = get_segments(start_lsn, end_lsn, wal_seg_size);
340 0 :
341 0 : // Pool of concurrent upload tasks. We use `FuturesOrdered` to
342 0 : // preserve order of uploads, and update `backup_lsn` only after
343 0 : // all previous uploads are finished.
344 0 : let mut uploads = FuturesOrdered::new();
345 0 : let mut iter = segments.iter();
346 :
347 : loop {
348 0 : let added_task = match iter.next() {
349 0 : Some(s) => {
350 0 : uploads.push_back(backup_single_segment(s, timeline_dir, remote_timeline_path));
351 0 : true
352 : }
353 0 : None => false,
354 : };
355 :
356 : // Wait for the next segment to upload if we don't have any more segments,
357 : // or if we have too many concurrent uploads.
358 0 : if !added_task || uploads.len() >= parallel_jobs {
359 0 : let next = uploads.next().await;
360 0 : if let Some(res) = next {
361 : // next segment uploaded
362 0 : let segment = res?;
363 0 : let new_backup_lsn = segment.end_lsn;
364 0 : timeline
365 0 : .set_wal_backup_lsn(new_backup_lsn)
366 0 : .await
367 0 : .context("setting wal_backup_lsn")?;
368 0 : *backup_lsn = new_backup_lsn;
369 : } else {
370 : // no more segments to upload
371 0 : break;
372 : }
373 0 : }
374 : }
375 :
376 0 : info!(
377 0 : "offloaded segnos {:?} up to {}, previous backup_lsn {}",
378 0 : segments.iter().map(|&s| s.seg_no).collect::<Vec<_>>(),
379 : end_lsn,
380 : start_lsn,
381 : );
382 0 : Ok(())
383 0 : }
384 :
385 0 : async fn backup_single_segment(
386 0 : seg: &Segment,
387 0 : timeline_dir: &Utf8Path,
388 0 : remote_timeline_path: &RemotePath,
389 0 : ) -> Result<Segment> {
390 0 : let segment_file_path = seg.file_path(timeline_dir)?;
391 0 : let remote_segment_path = seg.remote_path(remote_timeline_path);
392 :
393 0 : let res = backup_object(&segment_file_path, &remote_segment_path, seg.size()).await;
394 0 : if res.is_ok() {
395 0 : BACKED_UP_SEGMENTS.inc();
396 0 : } else {
397 0 : BACKUP_ERRORS.inc();
398 0 : }
399 0 : res?;
400 0 : debug!("Backup of {} done", segment_file_path);
401 :
402 0 : Ok(*seg)
403 0 : }
404 :
405 : #[derive(Debug, Copy, Clone)]
406 : pub struct Segment {
407 : seg_no: XLogSegNo,
408 : start_lsn: Lsn,
409 : end_lsn: Lsn,
410 : }
411 :
412 : impl Segment {
413 0 : pub fn new(seg_no: u64, start_lsn: Lsn, end_lsn: Lsn) -> Self {
414 0 : Self {
415 0 : seg_no,
416 0 : start_lsn,
417 0 : end_lsn,
418 0 : }
419 0 : }
420 :
421 0 : pub fn object_name(self) -> String {
422 0 : XLogFileName(PG_TLI, self.seg_no, self.size())
423 0 : }
424 :
425 0 : pub fn file_path(self, timeline_dir: &Utf8Path) -> Result<Utf8PathBuf> {
426 0 : Ok(timeline_dir.join(self.object_name()))
427 0 : }
428 :
429 0 : pub fn remote_path(self, remote_timeline_path: &RemotePath) -> RemotePath {
430 0 : remote_timeline_path.join(self.object_name())
431 0 : }
432 :
433 0 : pub fn size(self) -> usize {
434 0 : (u64::from(self.end_lsn) - u64::from(self.start_lsn)) as usize
435 0 : }
436 : }
437 :
438 0 : fn get_segments(start: Lsn, end: Lsn, seg_size: usize) -> Vec<Segment> {
439 0 : let first_seg = start.segment_number(seg_size);
440 0 : let last_seg = end.segment_number(seg_size);
441 0 :
442 0 : let res: Vec<Segment> = (first_seg..last_seg)
443 0 : .map(|s| {
444 0 : let start_lsn = XLogSegNoOffsetToRecPtr(s, 0, seg_size);
445 0 : let end_lsn = XLogSegNoOffsetToRecPtr(s + 1, 0, seg_size);
446 0 : Segment::new(s, Lsn::from(start_lsn), Lsn::from(end_lsn))
447 0 : })
448 0 : .collect();
449 0 : res
450 0 : }
451 :
452 0 : async fn backup_object(
453 0 : source_file: &Utf8Path,
454 0 : target_file: &RemotePath,
455 0 : size: usize,
456 0 : ) -> Result<()> {
457 0 : let storage = get_configured_remote_storage();
458 :
459 0 : let file = File::open(&source_file)
460 0 : .await
461 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
462 :
463 0 : let file = tokio_util::io::ReaderStream::with_capacity(file, BUFFER_SIZE);
464 0 :
465 0 : let cancel = CancellationToken::new();
466 0 :
467 0 : storage
468 0 : .upload_storage_object(file, size, target_file, &cancel)
469 0 : .await
470 0 : }
471 :
472 0 : pub(crate) async fn backup_partial_segment(
473 0 : source_file: &Utf8Path,
474 0 : target_file: &RemotePath,
475 0 : size: usize,
476 0 : ) -> Result<()> {
477 0 : let storage = get_configured_remote_storage();
478 :
479 0 : let file = File::open(&source_file)
480 0 : .await
481 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
482 :
483 : // limiting the file to read only the first `size` bytes
484 0 : let limited_file = tokio::io::AsyncReadExt::take(file, size as u64);
485 0 :
486 0 : let file = tokio_util::io::ReaderStream::with_capacity(limited_file, BUFFER_SIZE);
487 0 :
488 0 : let cancel = CancellationToken::new();
489 0 :
490 0 : storage
491 0 : .upload(
492 0 : file,
493 0 : size,
494 0 : target_file,
495 0 : Some(StorageMetadata::from([("sk_type", "partial_segment")])),
496 0 : &cancel,
497 0 : )
498 0 : .await
499 0 : }
500 :
501 0 : pub(crate) async fn copy_partial_segment(
502 0 : source: &RemotePath,
503 0 : destination: &RemotePath,
504 0 : ) -> Result<()> {
505 0 : let storage = get_configured_remote_storage();
506 0 : let cancel = CancellationToken::new();
507 0 :
508 0 : storage.copy_object(source, destination, &cancel).await
509 0 : }
510 :
511 0 : pub async fn read_object(
512 0 : file_path: &RemotePath,
513 0 : offset: u64,
514 0 : ) -> anyhow::Result<Pin<Box<dyn tokio::io::AsyncRead + Send + Sync>>> {
515 0 : let storage = REMOTE_STORAGE
516 0 : .get()
517 0 : .context("Failed to get remote storage")?
518 0 : .as_ref()
519 0 : .context("No remote storage configured")?;
520 :
521 0 : info!("segment download about to start from remote path {file_path:?} at offset {offset}");
522 :
523 0 : let cancel = CancellationToken::new();
524 0 :
525 0 : let opts = DownloadOpts {
526 0 : byte_start: std::ops::Bound::Included(offset),
527 0 : ..Default::default()
528 0 : };
529 0 : let download = storage
530 0 : .download(file_path, &opts, &cancel)
531 0 : .await
532 0 : .with_context(|| {
533 0 : format!("Failed to open WAL segment download stream for remote path {file_path:?}")
534 0 : })?;
535 :
536 0 : let reader = tokio_util::io::StreamReader::new(download.download_stream);
537 0 :
538 0 : let reader = tokio::io::BufReader::with_capacity(BUFFER_SIZE, reader);
539 0 :
540 0 : Ok(Box::pin(reader))
541 0 : }
542 :
543 : /// Delete WAL files for the given timeline. Remote storage must be configured
544 : /// when called.
545 0 : pub async fn delete_timeline(ttid: &TenantTimelineId) -> Result<()> {
546 0 : let storage = get_configured_remote_storage();
547 0 : let remote_path = remote_timeline_path(ttid)?;
548 :
549 : // see DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
550 : // const Option unwrap is not stable, otherwise it would be const.
551 0 : let batch_size: NonZeroU32 = NonZeroU32::new(1000).unwrap();
552 0 :
553 0 : // A backoff::retry is used here for two reasons:
554 0 : // - To provide a backoff rather than busy-polling the API on errors
555 0 : // - To absorb transient 429/503 conditions without hitting our error
556 0 : // logging path for issues deleting objects.
557 0 : //
558 0 : // Note: listing segments might take a long time if there are many of them.
559 0 : // We don't currently have http requests timeout cancellation, but if/once
560 0 : // we have listing should get streaming interface to make progress.
561 0 :
562 0 : let cancel = CancellationToken::new(); // not really used
563 0 : backoff::retry(
564 0 : || async {
565 : // Do list-delete in batch_size batches to make progress even if there a lot of files.
566 : // Alternatively we could make remote storage list return iterator, but it is more complicated and
567 : // I'm not sure deleting while iterating is expected in s3.
568 : loop {
569 0 : let files = storage
570 0 : .list(
571 0 : Some(&remote_path),
572 0 : ListingMode::NoDelimiter,
573 0 : Some(batch_size),
574 0 : &cancel,
575 0 : )
576 0 : .await?
577 : .keys
578 0 : .into_iter()
579 0 : .map(|o| o.key)
580 0 : .collect::<Vec<_>>();
581 0 : if files.is_empty() {
582 0 : return Ok(()); // done
583 0 : }
584 0 : // (at least) s3 results are sorted, so can log min/max:
585 0 : // "List results are always returned in UTF-8 binary order."
586 0 : info!(
587 0 : "deleting batch of {} WAL segments [{}-{}]",
588 0 : files.len(),
589 0 : files.first().unwrap().object_name().unwrap_or(""),
590 0 : files.last().unwrap().object_name().unwrap_or("")
591 : );
592 0 : storage.delete_objects(&files, &cancel).await?;
593 : }
594 0 : },
595 0 : // consider TimeoutOrCancel::caused_by_cancel when using cancellation
596 0 : |_| false,
597 0 : 3,
598 0 : 10,
599 0 : "executing WAL segments deletion batch",
600 0 : &cancel,
601 0 : )
602 0 : .await
603 0 : .ok_or_else(|| anyhow::anyhow!("canceled"))
604 0 : .and_then(|x| x)?;
605 :
606 0 : Ok(())
607 0 : }
608 :
609 : /// Used by wal_backup_partial.
610 0 : pub async fn delete_objects(paths: &[RemotePath]) -> Result<()> {
611 0 : let cancel = CancellationToken::new(); // not really used
612 0 : let storage = get_configured_remote_storage();
613 0 : storage.delete_objects(paths, &cancel).await
614 0 : }
615 :
616 : /// Copy segments from one timeline to another. Used in copy_timeline.
617 0 : pub async fn copy_s3_segments(
618 0 : wal_seg_size: usize,
619 0 : src_ttid: &TenantTimelineId,
620 0 : dst_ttid: &TenantTimelineId,
621 0 : from_segment: XLogSegNo,
622 0 : to_segment: XLogSegNo,
623 0 : ) -> Result<()> {
624 : const SEGMENTS_PROGRESS_REPORT_INTERVAL: u64 = 1024;
625 :
626 0 : let storage = REMOTE_STORAGE
627 0 : .get()
628 0 : .expect("failed to get remote storage")
629 0 : .as_ref()
630 0 : .unwrap();
631 :
632 0 : let remote_dst_path = remote_timeline_path(dst_ttid)?;
633 :
634 0 : let cancel = CancellationToken::new();
635 :
636 0 : let files = storage
637 0 : .list(
638 0 : Some(&remote_dst_path),
639 0 : ListingMode::NoDelimiter,
640 0 : None,
641 0 : &cancel,
642 0 : )
643 0 : .await?
644 : .keys;
645 :
646 0 : let uploaded_segments = &files
647 0 : .iter()
648 0 : .filter_map(|o| o.key.object_name().map(ToOwned::to_owned))
649 0 : .collect::<HashSet<_>>();
650 0 :
651 0 : debug!(
652 0 : "these segments have already been uploaded: {:?}",
653 : uploaded_segments
654 : );
655 :
656 0 : for segno in from_segment..to_segment {
657 0 : if segno % SEGMENTS_PROGRESS_REPORT_INTERVAL == 0 {
658 0 : info!("copied all segments from {} until {}", from_segment, segno);
659 0 : }
660 :
661 0 : let segment_name = XLogFileName(PG_TLI, segno, wal_seg_size);
662 0 : if uploaded_segments.contains(&segment_name) {
663 0 : continue;
664 0 : }
665 0 : debug!("copying segment {}", segment_name);
666 :
667 0 : let from = remote_timeline_path(src_ttid)?.join(&segment_name);
668 0 : let to = remote_dst_path.join(&segment_name);
669 0 :
670 0 : storage.copy_object(&from, &to, &cancel).await?;
671 : }
672 :
673 0 : info!(
674 0 : "finished copying segments from {} until {}",
675 : from_segment, to_segment
676 : );
677 0 : Ok(())
678 0 : }
679 :
680 : /// Get S3 (remote_storage) prefix path used for timeline files.
681 0 : pub fn remote_timeline_path(ttid: &TenantTimelineId) -> Result<RemotePath> {
682 0 : RemotePath::new(&Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string()))
683 0 : }
|