Line data Source code
1 : use std::cmp::min;
2 : use std::collections::HashSet;
3 : use std::num::NonZeroU32;
4 : use std::pin::Pin;
5 : use std::sync::Arc;
6 : use std::time::Duration;
7 :
8 : use anyhow::{Context, Result};
9 : use camino::{Utf8Path, Utf8PathBuf};
10 : use futures::StreamExt;
11 : use futures::stream::FuturesOrdered;
12 : use postgres_ffi::v14::xlog_utils::XLogSegNoOffsetToRecPtr;
13 : use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
14 : use remote_storage::{
15 : DownloadOpts, GenericRemoteStorage, ListingMode, RemotePath, StorageMetadata,
16 : };
17 : use safekeeper_api::models::PeerInfo;
18 : use tokio::fs::File;
19 : use tokio::select;
20 : use tokio::sync::mpsc::{self, Receiver, Sender};
21 : use tokio::sync::watch;
22 : use tokio::task::JoinHandle;
23 : use tokio_util::sync::CancellationToken;
24 : use tracing::*;
25 : use utils::id::{NodeId, TenantTimelineId};
26 : use utils::lsn::Lsn;
27 : use utils::{backoff, pausable_failpoint};
28 :
29 : use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS};
30 : use crate::timeline::WalResidentTimeline;
31 : use crate::timeline_manager::{Manager, StateSnapshot};
32 : use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME};
33 :
34 : const UPLOAD_FAILURE_RETRY_MIN_MS: u64 = 10;
35 : const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000;
36 :
37 : /// Default buffer size when interfacing with [`tokio::fs::File`].
38 : const BUFFER_SIZE: usize = 32 * 1024;
39 :
40 : pub struct WalBackupTaskHandle {
41 : shutdown_tx: Sender<()>,
42 : handle: JoinHandle<()>,
43 : }
44 :
45 : impl WalBackupTaskHandle {
46 0 : pub(crate) async fn join(self) {
47 0 : if let Err(e) = self.handle.await {
48 0 : error!("WAL backup task panicked: {}", e);
49 0 : }
50 0 : }
51 : }
52 :
53 : /// Do we have anything to upload to S3, i.e. should safekeepers run backup activity?
54 34 : pub(crate) fn is_wal_backup_required(
55 34 : wal_seg_size: usize,
56 34 : num_computes: usize,
57 34 : state: &StateSnapshot,
58 34 : ) -> bool {
59 34 : num_computes > 0 ||
60 : // Currently only the whole segment is offloaded, so compare segment numbers.
61 26 : (state.commit_lsn.segment_number(wal_seg_size) > state.backup_lsn.segment_number(wal_seg_size))
62 34 : }
63 :
64 : /// Based on peer information determine which safekeeper should offload; if it
65 : /// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task
66 : /// is running, kill it.
67 0 : pub(crate) async fn update_task(
68 0 : mgr: &mut Manager,
69 0 : storage: Arc<GenericRemoteStorage>,
70 0 : need_backup: bool,
71 0 : state: &StateSnapshot,
72 0 : ) {
73 0 : let (offloader, election_dbg_str) =
74 0 : determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf);
75 0 : let elected_me = Some(mgr.conf.my_id) == offloader;
76 :
77 0 : let should_task_run = need_backup && elected_me;
78 :
79 : // start or stop the task
80 0 : if should_task_run != (mgr.backup_task.is_some()) {
81 0 : if should_task_run {
82 0 : info!("elected for backup: {}", election_dbg_str);
83 :
84 0 : let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
85 :
86 0 : let Ok(resident) = mgr.wal_resident_timeline() else {
87 0 : info!("Timeline shut down");
88 0 : return;
89 : };
90 :
91 0 : let async_task = backup_task_main(
92 0 : resident,
93 0 : storage,
94 0 : mgr.conf.backup_parallel_jobs,
95 0 : shutdown_rx,
96 0 : );
97 :
98 0 : let handle = if mgr.conf.current_thread_runtime {
99 0 : tokio::spawn(async_task)
100 : } else {
101 0 : WAL_BACKUP_RUNTIME.spawn(async_task)
102 : };
103 :
104 0 : mgr.backup_task = Some(WalBackupTaskHandle {
105 0 : shutdown_tx,
106 0 : handle,
107 0 : });
108 : } else {
109 0 : if !need_backup {
110 : // don't need backup at all
111 0 : info!("stepping down from backup, need_backup={}", need_backup);
112 : } else {
113 : // someone else has been elected
114 0 : info!("stepping down from backup: {}", election_dbg_str);
115 : }
116 0 : shut_down_task(&mut mgr.backup_task).await;
117 : }
118 0 : }
119 0 : }
120 :
121 0 : async fn shut_down_task(entry: &mut Option<WalBackupTaskHandle>) {
122 0 : if let Some(wb_handle) = entry.take() {
123 : // Tell the task to shutdown. Error means task exited earlier, that's ok.
124 0 : let _ = wb_handle.shutdown_tx.send(()).await;
125 : // Await the task itself. TODO: restart panicked tasks earlier.
126 0 : wb_handle.join().await;
127 0 : }
128 0 : }
129 :
130 : /// The goal is to ensure that normally only one safekeepers offloads. However,
131 : /// it is fine (and inevitable, as s3 doesn't provide CAS) that for some short
132 : /// time we have several ones as they PUT the same files. Also,
133 : /// - frequently changing the offloader would be bad;
134 : /// - electing seriously lagging safekeeper is undesirable;
135 : ///
136 : /// So we deterministically choose among the reasonably caught up candidates.
137 : /// TODO: take into account failed attempts to deal with hypothetical situation
138 : /// where s3 is unreachable only for some sks.
139 0 : fn determine_offloader(
140 0 : alive_peers: &[PeerInfo],
141 0 : wal_backup_lsn: Lsn,
142 0 : ttid: TenantTimelineId,
143 0 : conf: &SafeKeeperConf,
144 0 : ) -> (Option<NodeId>, String) {
145 0 : // TODO: remove this once we fill newly joined safekeepers since backup_lsn.
146 0 : let capable_peers = alive_peers
147 0 : .iter()
148 0 : .filter(|p| p.local_start_lsn <= wal_backup_lsn);
149 0 : match capable_peers.clone().map(|p| p.commit_lsn).max() {
150 0 : None => (None, "no connected peers to elect from".to_string()),
151 0 : Some(max_commit_lsn) => {
152 0 : let threshold = max_commit_lsn
153 0 : .checked_sub(conf.max_offloader_lag_bytes)
154 0 : .unwrap_or(Lsn(0));
155 0 : let mut caughtup_peers = capable_peers
156 0 : .clone()
157 0 : .filter(|p| p.commit_lsn >= threshold)
158 0 : .collect::<Vec<_>>();
159 0 : caughtup_peers.sort_by(|p1, p2| p1.sk_id.cmp(&p2.sk_id));
160 0 :
161 0 : // To distribute the load, shift by timeline_id.
162 0 : let offloader = caughtup_peers
163 0 : [(u128::from(ttid.timeline_id) % caughtup_peers.len() as u128) as usize]
164 0 : .sk_id;
165 0 :
166 0 : let mut capable_peers_dbg = capable_peers
167 0 : .map(|p| (p.sk_id, p.commit_lsn))
168 0 : .collect::<Vec<_>>();
169 0 : capable_peers_dbg.sort_by(|p1, p2| p1.0.cmp(&p2.0));
170 0 : (
171 0 : Some(offloader),
172 0 : format!(
173 0 : "elected {} among {:?} peers, with {} of them being caughtup",
174 0 : offloader,
175 0 : capable_peers_dbg,
176 0 : caughtup_peers.len()
177 0 : ),
178 0 : )
179 : }
180 : }
181 0 : }
182 :
183 : pub struct WalBackup {
184 : storage: Option<Arc<GenericRemoteStorage>>,
185 : }
186 :
187 : impl WalBackup {
188 : /// Create a new WalBackup instance.
189 5 : pub async fn new(conf: &SafeKeeperConf) -> Result<Self> {
190 5 : if !conf.wal_backup_enabled {
191 0 : return Ok(Self { storage: None });
192 5 : }
193 5 :
194 5 : match conf.remote_storage.as_ref() {
195 0 : Some(config) => {
196 0 : let storage = GenericRemoteStorage::from_config(config).await?;
197 0 : Ok(Self {
198 0 : storage: Some(Arc::new(storage)),
199 0 : })
200 : }
201 5 : None => Ok(Self { storage: None }),
202 : }
203 5 : }
204 :
205 68 : pub fn get_storage(&self) -> Option<Arc<GenericRemoteStorage>> {
206 68 : self.storage.clone()
207 68 : }
208 : }
209 :
210 : struct WalBackupTask {
211 : timeline: WalResidentTimeline,
212 : timeline_dir: Utf8PathBuf,
213 : wal_seg_size: usize,
214 : parallel_jobs: usize,
215 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
216 : storage: Arc<GenericRemoteStorage>,
217 : }
218 :
219 : /// Offload single timeline.
220 : #[instrument(name = "wal_backup", skip_all, fields(ttid = %tli.ttid))]
221 : async fn backup_task_main(
222 : tli: WalResidentTimeline,
223 : storage: Arc<GenericRemoteStorage>,
224 : parallel_jobs: usize,
225 : mut shutdown_rx: Receiver<()>,
226 : ) {
227 : let _guard = WAL_BACKUP_TASKS.guard();
228 : info!("started");
229 :
230 : let cancel = tli.tli.cancel.clone();
231 : let mut wb = WalBackupTask {
232 : wal_seg_size: tli.get_wal_seg_size().await,
233 : commit_lsn_watch_rx: tli.get_commit_lsn_watch_rx(),
234 : timeline_dir: tli.get_timeline_dir(),
235 : timeline: tli,
236 : parallel_jobs,
237 : storage,
238 : };
239 :
240 : // task is spinned up only when wal_seg_size already initialized
241 : assert!(wb.wal_seg_size > 0);
242 :
243 : let mut canceled = false;
244 : select! {
245 : _ = wb.run() => {}
246 : _ = shutdown_rx.recv() => {
247 : canceled = true;
248 : },
249 : _ = cancel.cancelled() => {
250 : canceled = true;
251 : }
252 : }
253 : info!("task {}", if canceled { "canceled" } else { "terminated" });
254 : }
255 :
256 : impl WalBackupTask {
257 : /// This function must be called from a select! that also respects self.timeline's
258 : /// cancellation token. This is done in [`backup_task_main`].
259 : ///
260 : /// The future returned by this function is safe to drop at any time because it
261 : /// does not write to local disk.
262 0 : async fn run(&mut self) {
263 0 : let mut backup_lsn = Lsn(0);
264 0 :
265 0 : let mut retry_attempt = 0u32;
266 : // offload loop
267 0 : while !self.timeline.cancel.is_cancelled() {
268 0 : if retry_attempt == 0 {
269 : // wait for new WAL to arrive
270 0 : if let Err(e) = self.commit_lsn_watch_rx.changed().await {
271 : // should never happen, as we hold Arc to timeline and transmitter's lifetime
272 : // is within Timeline's
273 0 : error!("commit_lsn watch shut down: {:?}", e);
274 0 : return;
275 0 : };
276 : } else {
277 : // or just sleep if we errored previously
278 0 : let mut retry_delay = UPLOAD_FAILURE_RETRY_MAX_MS;
279 0 : if let Some(backoff_delay) = UPLOAD_FAILURE_RETRY_MIN_MS.checked_shl(retry_attempt)
280 0 : {
281 0 : retry_delay = min(retry_delay, backoff_delay);
282 0 : }
283 0 : tokio::time::sleep(Duration::from_millis(retry_delay)).await;
284 : }
285 :
286 0 : let commit_lsn = *self.commit_lsn_watch_rx.borrow();
287 0 :
288 0 : // Note that backup_lsn can be higher than commit_lsn if we
289 0 : // don't have much local WAL and others already uploaded
290 0 : // segments we don't even have.
291 0 : if backup_lsn.segment_number(self.wal_seg_size)
292 0 : >= commit_lsn.segment_number(self.wal_seg_size)
293 : {
294 0 : retry_attempt = 0;
295 0 : continue; /* nothing to do, common case as we wake up on every commit_lsn bump */
296 0 : }
297 0 : // Perhaps peers advanced the position, check shmem value.
298 0 : backup_lsn = self.timeline.get_wal_backup_lsn().await;
299 0 : if backup_lsn.segment_number(self.wal_seg_size)
300 0 : >= commit_lsn.segment_number(self.wal_seg_size)
301 : {
302 0 : retry_attempt = 0;
303 0 : continue;
304 0 : }
305 0 :
306 0 : match backup_lsn_range(
307 0 : &self.timeline,
308 0 : self.storage.clone(),
309 0 : &mut backup_lsn,
310 0 : commit_lsn,
311 0 : self.wal_seg_size,
312 0 : &self.timeline_dir,
313 0 : self.parallel_jobs,
314 0 : )
315 0 : .await
316 : {
317 0 : Ok(()) => {
318 0 : retry_attempt = 0;
319 0 : }
320 0 : Err(e) => {
321 0 : // We might have managed to upload some segment even though
322 0 : // some later in the range failed, so log backup_lsn
323 0 : // separately.
324 0 : error!(
325 0 : "failed while offloading range {}-{}, backup_lsn {}: {:?}",
326 : backup_lsn, commit_lsn, backup_lsn, e
327 : );
328 :
329 0 : retry_attempt = retry_attempt.saturating_add(1);
330 : }
331 : }
332 : }
333 0 : }
334 : }
335 :
336 0 : async fn backup_lsn_range(
337 0 : timeline: &WalResidentTimeline,
338 0 : storage: Arc<GenericRemoteStorage>,
339 0 : backup_lsn: &mut Lsn,
340 0 : end_lsn: Lsn,
341 0 : wal_seg_size: usize,
342 0 : timeline_dir: &Utf8Path,
343 0 : parallel_jobs: usize,
344 0 : ) -> Result<()> {
345 0 : if parallel_jobs < 1 {
346 0 : anyhow::bail!("parallel_jobs must be >= 1");
347 0 : }
348 0 :
349 0 : let remote_timeline_path = &timeline.remote_path;
350 0 : let start_lsn = *backup_lsn;
351 0 : let segments = get_segments(start_lsn, end_lsn, wal_seg_size);
352 0 :
353 0 : info!(
354 0 : "offloading segnos {:?} of range [{}-{})",
355 0 : segments.iter().map(|&s| s.seg_no).collect::<Vec<_>>(),
356 : start_lsn,
357 : end_lsn,
358 : );
359 :
360 : // Pool of concurrent upload tasks. We use `FuturesOrdered` to
361 : // preserve order of uploads, and update `backup_lsn` only after
362 : // all previous uploads are finished.
363 0 : let mut uploads = FuturesOrdered::new();
364 0 : let mut iter = segments.iter();
365 :
366 : loop {
367 0 : let added_task = match iter.next() {
368 0 : Some(s) => {
369 0 : uploads.push_back(backup_single_segment(
370 0 : &storage,
371 0 : s,
372 0 : timeline_dir,
373 0 : remote_timeline_path,
374 0 : ));
375 0 : true
376 : }
377 0 : None => false,
378 : };
379 :
380 : // Wait for the next segment to upload if we don't have any more segments,
381 : // or if we have too many concurrent uploads.
382 0 : if !added_task || uploads.len() >= parallel_jobs {
383 0 : let next = uploads.next().await;
384 0 : if let Some(res) = next {
385 : // next segment uploaded
386 0 : let segment = res?;
387 0 : let new_backup_lsn = segment.end_lsn;
388 0 : timeline
389 0 : .set_wal_backup_lsn(new_backup_lsn)
390 0 : .await
391 0 : .context("setting wal_backup_lsn")?;
392 0 : *backup_lsn = new_backup_lsn;
393 : } else {
394 : // no more segments to upload
395 0 : break;
396 : }
397 0 : }
398 : }
399 :
400 0 : info!(
401 0 : "offloaded segnos {:?} of range [{}-{})",
402 0 : segments.iter().map(|&s| s.seg_no).collect::<Vec<_>>(),
403 : start_lsn,
404 : end_lsn,
405 : );
406 0 : Ok(())
407 0 : }
408 :
409 0 : async fn backup_single_segment(
410 0 : storage: &GenericRemoteStorage,
411 0 : seg: &Segment,
412 0 : timeline_dir: &Utf8Path,
413 0 : remote_timeline_path: &RemotePath,
414 0 : ) -> Result<Segment> {
415 0 : let segment_file_path = seg.file_path(timeline_dir)?;
416 0 : let remote_segment_path = seg.remote_path(remote_timeline_path);
417 :
418 0 : let res = backup_object(
419 0 : storage,
420 0 : &segment_file_path,
421 0 : &remote_segment_path,
422 0 : seg.size(),
423 0 : )
424 0 : .await;
425 0 : if res.is_ok() {
426 0 : BACKED_UP_SEGMENTS.inc();
427 0 : } else {
428 0 : BACKUP_ERRORS.inc();
429 0 : }
430 0 : res?;
431 0 : debug!("Backup of {} done", segment_file_path);
432 :
433 0 : Ok(*seg)
434 0 : }
435 :
436 : #[derive(Debug, Copy, Clone)]
437 : pub struct Segment {
438 : seg_no: XLogSegNo,
439 : start_lsn: Lsn,
440 : end_lsn: Lsn,
441 : }
442 :
443 : impl Segment {
444 0 : pub fn new(seg_no: u64, start_lsn: Lsn, end_lsn: Lsn) -> Self {
445 0 : Self {
446 0 : seg_no,
447 0 : start_lsn,
448 0 : end_lsn,
449 0 : }
450 0 : }
451 :
452 0 : pub fn object_name(self) -> String {
453 0 : XLogFileName(PG_TLI, self.seg_no, self.size())
454 0 : }
455 :
456 0 : pub fn file_path(self, timeline_dir: &Utf8Path) -> Result<Utf8PathBuf> {
457 0 : Ok(timeline_dir.join(self.object_name()))
458 0 : }
459 :
460 0 : pub fn remote_path(self, remote_timeline_path: &RemotePath) -> RemotePath {
461 0 : remote_timeline_path.join(self.object_name())
462 0 : }
463 :
464 0 : pub fn size(self) -> usize {
465 0 : (u64::from(self.end_lsn) - u64::from(self.start_lsn)) as usize
466 0 : }
467 : }
468 :
469 0 : fn get_segments(start: Lsn, end: Lsn, seg_size: usize) -> Vec<Segment> {
470 0 : let first_seg = start.segment_number(seg_size);
471 0 : let last_seg = end.segment_number(seg_size);
472 0 :
473 0 : let res: Vec<Segment> = (first_seg..last_seg)
474 0 : .map(|s| {
475 0 : let start_lsn = XLogSegNoOffsetToRecPtr(s, 0, seg_size);
476 0 : let end_lsn = XLogSegNoOffsetToRecPtr(s + 1, 0, seg_size);
477 0 : Segment::new(s, Lsn::from(start_lsn), Lsn::from(end_lsn))
478 0 : })
479 0 : .collect();
480 0 : res
481 0 : }
482 :
483 0 : async fn backup_object(
484 0 : storage: &GenericRemoteStorage,
485 0 : source_file: &Utf8Path,
486 0 : target_file: &RemotePath,
487 0 : size: usize,
488 0 : ) -> Result<()> {
489 0 : let file = File::open(&source_file)
490 0 : .await
491 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
492 :
493 0 : let file = tokio_util::io::ReaderStream::with_capacity(file, BUFFER_SIZE);
494 0 :
495 0 : let cancel = CancellationToken::new();
496 0 :
497 0 : storage
498 0 : .upload_storage_object(file, size, target_file, &cancel)
499 0 : .await
500 0 : }
501 :
502 0 : pub(crate) async fn backup_partial_segment(
503 0 : storage: &GenericRemoteStorage,
504 0 : source_file: &Utf8Path,
505 0 : target_file: &RemotePath,
506 0 : size: usize,
507 0 : ) -> Result<()> {
508 0 : let file = File::open(&source_file)
509 0 : .await
510 0 : .with_context(|| format!("Failed to open file {source_file:?} for wal backup"))?;
511 :
512 : // limiting the file to read only the first `size` bytes
513 0 : let limited_file = tokio::io::AsyncReadExt::take(file, size as u64);
514 0 :
515 0 : let file = tokio_util::io::ReaderStream::with_capacity(limited_file, BUFFER_SIZE);
516 0 :
517 0 : let cancel = CancellationToken::new();
518 0 :
519 0 : storage
520 0 : .upload(
521 0 : file,
522 0 : size,
523 0 : target_file,
524 0 : Some(StorageMetadata::from([("sk_type", "partial_segment")])),
525 0 : &cancel,
526 0 : )
527 0 : .await
528 0 : }
529 :
530 0 : pub(crate) async fn copy_partial_segment(
531 0 : storage: &GenericRemoteStorage,
532 0 : source: &RemotePath,
533 0 : destination: &RemotePath,
534 0 : ) -> Result<()> {
535 0 : let cancel = CancellationToken::new();
536 0 :
537 0 : storage.copy_object(source, destination, &cancel).await
538 0 : }
539 :
540 0 : pub async fn read_object(
541 0 : storage: &GenericRemoteStorage,
542 0 : file_path: &RemotePath,
543 0 : offset: u64,
544 0 : ) -> anyhow::Result<Pin<Box<dyn tokio::io::AsyncRead + Send + Sync>>> {
545 0 : info!("segment download about to start from remote path {file_path:?} at offset {offset}");
546 :
547 0 : let cancel = CancellationToken::new();
548 0 :
549 0 : let opts = DownloadOpts {
550 0 : byte_start: std::ops::Bound::Included(offset),
551 0 : ..Default::default()
552 0 : };
553 0 : let download = storage
554 0 : .download(file_path, &opts, &cancel)
555 0 : .await
556 0 : .with_context(|| {
557 0 : format!("Failed to open WAL segment download stream for remote path {file_path:?}")
558 0 : })?;
559 :
560 0 : let reader = tokio_util::io::StreamReader::new(download.download_stream);
561 0 :
562 0 : let reader = tokio::io::BufReader::with_capacity(BUFFER_SIZE, reader);
563 0 :
564 0 : Ok(Box::pin(reader))
565 0 : }
566 :
567 : /// Delete WAL files for the given timeline. Remote storage must be configured
568 : /// when called.
569 0 : pub async fn delete_timeline(
570 0 : storage: &GenericRemoteStorage,
571 0 : ttid: &TenantTimelineId,
572 0 : ) -> Result<()> {
573 0 : let remote_path = remote_timeline_path(ttid)?;
574 :
575 : // see DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
576 : // const Option unwrap is not stable, otherwise it would be const.
577 0 : let batch_size: NonZeroU32 = NonZeroU32::new(1000).unwrap();
578 0 :
579 0 : // A backoff::retry is used here for two reasons:
580 0 : // - To provide a backoff rather than busy-polling the API on errors
581 0 : // - To absorb transient 429/503 conditions without hitting our error
582 0 : // logging path for issues deleting objects.
583 0 : //
584 0 : // Note: listing segments might take a long time if there are many of them.
585 0 : // We don't currently have http requests timeout cancellation, but if/once
586 0 : // we have listing should get streaming interface to make progress.
587 0 :
588 0 : pausable_failpoint!("sk-delete-timeline-remote-pause");
589 :
590 0 : fail::fail_point!("sk-delete-timeline-remote", |_| {
591 0 : Err(anyhow::anyhow!("failpoint: sk-delete-timeline-remote"))
592 0 : });
593 :
594 0 : let cancel = CancellationToken::new(); // not really used
595 0 : backoff::retry(
596 0 : || async {
597 : // Do list-delete in batch_size batches to make progress even if there a lot of files.
598 : // Alternatively we could make remote storage list return iterator, but it is more complicated and
599 : // I'm not sure deleting while iterating is expected in s3.
600 : loop {
601 0 : let files = storage
602 0 : .list(
603 0 : Some(&remote_path),
604 0 : ListingMode::NoDelimiter,
605 0 : Some(batch_size),
606 0 : &cancel,
607 0 : )
608 0 : .await?
609 : .keys
610 0 : .into_iter()
611 0 : .map(|o| o.key)
612 0 : .collect::<Vec<_>>();
613 0 : if files.is_empty() {
614 0 : return Ok(()); // done
615 0 : }
616 0 : // (at least) s3 results are sorted, so can log min/max:
617 0 : // "List results are always returned in UTF-8 binary order."
618 0 : info!(
619 0 : "deleting batch of {} WAL segments [{}-{}]",
620 0 : files.len(),
621 0 : files.first().unwrap().object_name().unwrap_or(""),
622 0 : files.last().unwrap().object_name().unwrap_or("")
623 : );
624 0 : storage.delete_objects(&files, &cancel).await?;
625 : }
626 0 : },
627 0 : // consider TimeoutOrCancel::caused_by_cancel when using cancellation
628 0 : |_| false,
629 0 : 3,
630 0 : 10,
631 0 : "executing WAL segments deletion batch",
632 0 : &cancel,
633 0 : )
634 0 : .await
635 0 : .ok_or_else(|| anyhow::anyhow!("canceled"))
636 0 : .and_then(|x| x)?;
637 :
638 0 : Ok(())
639 0 : }
640 :
641 : /// Used by wal_backup_partial.
642 0 : pub async fn delete_objects(storage: &GenericRemoteStorage, paths: &[RemotePath]) -> Result<()> {
643 0 : let cancel = CancellationToken::new(); // not really used
644 0 : storage.delete_objects(paths, &cancel).await
645 0 : }
646 :
647 : /// Copy segments from one timeline to another. Used in copy_timeline.
648 0 : pub async fn copy_s3_segments(
649 0 : storage: &GenericRemoteStorage,
650 0 : wal_seg_size: usize,
651 0 : src_ttid: &TenantTimelineId,
652 0 : dst_ttid: &TenantTimelineId,
653 0 : from_segment: XLogSegNo,
654 0 : to_segment: XLogSegNo,
655 0 : ) -> Result<()> {
656 : const SEGMENTS_PROGRESS_REPORT_INTERVAL: u64 = 1024;
657 :
658 0 : let remote_dst_path = remote_timeline_path(dst_ttid)?;
659 :
660 0 : let cancel = CancellationToken::new();
661 :
662 0 : let files = storage
663 0 : .list(
664 0 : Some(&remote_dst_path),
665 0 : ListingMode::NoDelimiter,
666 0 : None,
667 0 : &cancel,
668 0 : )
669 0 : .await?
670 : .keys;
671 :
672 0 : let uploaded_segments = &files
673 0 : .iter()
674 0 : .filter_map(|o| o.key.object_name().map(ToOwned::to_owned))
675 0 : .collect::<HashSet<_>>();
676 0 :
677 0 : debug!(
678 0 : "these segments have already been uploaded: {:?}",
679 : uploaded_segments
680 : );
681 :
682 0 : for segno in from_segment..to_segment {
683 0 : if segno % SEGMENTS_PROGRESS_REPORT_INTERVAL == 0 {
684 0 : info!("copied all segments from {} until {}", from_segment, segno);
685 0 : }
686 :
687 0 : let segment_name = XLogFileName(PG_TLI, segno, wal_seg_size);
688 0 : if uploaded_segments.contains(&segment_name) {
689 0 : continue;
690 0 : }
691 0 : debug!("copying segment {}", segment_name);
692 :
693 0 : let from = remote_timeline_path(src_ttid)?.join(&segment_name);
694 0 : let to = remote_dst_path.join(&segment_name);
695 0 :
696 0 : storage.copy_object(&from, &to, &cancel).await?;
697 : }
698 :
699 0 : info!(
700 0 : "finished copying segments from {} until {}",
701 : from_segment, to_segment
702 : );
703 0 : Ok(())
704 0 : }
705 :
706 : /// Get S3 (remote_storage) prefix path used for timeline files.
707 14 : pub fn remote_timeline_path(ttid: &TenantTimelineId) -> Result<RemotePath> {
708 14 : RemotePath::new(&Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string()))
709 14 : }
|