Line data Source code
1 : use anyhow::{anyhow, bail, Context, Result};
2 : use bytes::Bytes;
3 : use camino::Utf8PathBuf;
4 : use chrono::{DateTime, Utc};
5 : use futures::{SinkExt, StreamExt, TryStreamExt};
6 : use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI};
7 : use safekeeper_api::{
8 : models::{PullTimelineRequest, PullTimelineResponse, TimelineStatus},
9 : Term,
10 : };
11 : use safekeeper_client::mgmt_api;
12 : use safekeeper_client::mgmt_api::Client;
13 : use serde::Deserialize;
14 : use std::{
15 : cmp::min,
16 : io::{self, ErrorKind},
17 : sync::Arc,
18 : };
19 : use tokio::{fs::OpenOptions, io::AsyncWrite, sync::mpsc, task};
20 : use tokio_tar::{Archive, Builder, Header};
21 : use tokio_util::{
22 : io::{CopyToBytes, SinkWriter},
23 : sync::PollSender,
24 : };
25 : use tracing::{error, info, instrument};
26 :
27 : use crate::{
28 : control_file::CONTROL_FILE_NAME,
29 : debug_dump,
30 : state::{EvictionState, TimelinePersistentState},
31 : timeline::{Timeline, WalResidentTimeline},
32 : timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline},
33 : wal_backup,
34 : wal_storage::open_wal_file,
35 : GlobalTimelines,
36 : };
37 : use utils::{
38 : crashsafe::fsync_async_opt,
39 : id::{NodeId, TenantTimelineId},
40 : logging::SecretString,
41 : lsn::Lsn,
42 : pausable_failpoint,
43 : };
44 :
45 : /// Stream tar archive of timeline to tx.
46 : #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
47 : pub async fn stream_snapshot(
48 : tli: Arc<Timeline>,
49 : source: NodeId,
50 : destination: NodeId,
51 : tx: mpsc::Sender<Result<Bytes>>,
52 : ) {
53 : match tli.try_wal_residence_guard().await {
54 : Err(e) => {
55 : tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
56 : .await
57 : .ok();
58 : }
59 : Ok(maybe_resident_tli) => {
60 : if let Err(e) = match maybe_resident_tli {
61 : Some(resident_tli) => {
62 : stream_snapshot_resident_guts(resident_tli, source, destination, tx.clone())
63 : .await
64 : }
65 : None => stream_snapshot_offloaded_guts(tli, source, destination, tx.clone()).await,
66 : } {
67 : // Error type/contents don't matter as they won't can't reach the client
68 : // (hyper likely doesn't do anything with it), but http stream will be
69 : // prematurely terminated. It would be nice to try to send the error in
70 : // trailers though.
71 : tx.send(Err(anyhow!("snapshot failed"))).await.ok();
72 : error!("snapshot failed: {:#}", e);
73 : }
74 : }
75 : }
76 : }
77 :
78 : /// State needed while streaming the snapshot.
79 : pub struct SnapshotContext {
80 : pub from_segno: XLogSegNo, // including
81 : pub upto_segno: XLogSegNo, // including
82 : pub term: Term,
83 : pub last_log_term: Term,
84 : pub flush_lsn: Lsn,
85 : pub wal_seg_size: usize,
86 : // used to remove WAL hold off in Drop.
87 : pub tli: WalResidentTimeline,
88 : }
89 :
90 : impl Drop for SnapshotContext {
91 0 : fn drop(&mut self) {
92 0 : let tli = self.tli.clone();
93 0 : task::spawn(async move {
94 0 : let mut shared_state = tli.write_shared_state().await;
95 0 : shared_state.wal_removal_on_hold = false;
96 0 : });
97 0 : }
98 : }
99 :
100 : /// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
101 0 : fn prepare_tar_stream(
102 0 : tx: mpsc::Sender<Result<Bytes>>,
103 0 : ) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
104 0 : // tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
105 0 : // use SinkWriter as a Write impl. That is,
106 0 : // - create Sink from the tx. It returns PollSendError if chan is closed.
107 0 : let sink = PollSender::new(tx);
108 0 : // - SinkWriter needs sink error to be io one, map it.
109 0 : let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
110 0 : // - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
111 0 : // it with with(). Note that with() accepts async function which we don't
112 0 : // need and allows the map to fail, which we don't need either, but hence
113 0 : // two Oks.
114 0 : let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
115 0 : // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
116 0 : // into CopyToBytes. This is a data copy.
117 0 : let copy_to_bytes = CopyToBytes::new(oksink);
118 0 : let writer = SinkWriter::new(copy_to_bytes);
119 0 : let pinned_writer = Box::pin(writer);
120 0 :
121 0 : // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
122 0 : // which is also likely suboptimal.
123 0 : Builder::new_non_terminated(pinned_writer)
124 0 : }
125 :
126 : /// Implementation of snapshot for an offloaded timeline, only reads control file
127 0 : pub(crate) async fn stream_snapshot_offloaded_guts(
128 0 : tli: Arc<Timeline>,
129 0 : source: NodeId,
130 0 : destination: NodeId,
131 0 : tx: mpsc::Sender<Result<Bytes>>,
132 0 : ) -> Result<()> {
133 0 : let mut ar = prepare_tar_stream(tx);
134 0 :
135 0 : tli.snapshot_offloaded(&mut ar, source, destination).await?;
136 :
137 0 : ar.finish().await?;
138 :
139 0 : Ok(())
140 0 : }
141 :
142 : /// Implementation of snapshot for a timeline which is resident (includes some segment data)
143 0 : pub async fn stream_snapshot_resident_guts(
144 0 : tli: WalResidentTimeline,
145 0 : source: NodeId,
146 0 : destination: NodeId,
147 0 : tx: mpsc::Sender<Result<Bytes>>,
148 0 : ) -> Result<()> {
149 0 : let mut ar = prepare_tar_stream(tx);
150 :
151 0 : let bctx = tli.start_snapshot(&mut ar, source, destination).await?;
152 0 : pausable_failpoint!("sk-snapshot-after-list-pausable");
153 :
154 0 : let tli_dir = tli.get_timeline_dir();
155 0 : info!(
156 0 : "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
157 0 : bctx.upto_segno - bctx.from_segno + 1,
158 : bctx.from_segno,
159 : bctx.upto_segno,
160 : bctx.term,
161 : bctx.last_log_term,
162 : bctx.flush_lsn,
163 : );
164 0 : for segno in bctx.from_segno..=bctx.upto_segno {
165 0 : let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?;
166 0 : let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
167 0 : if is_partial {
168 0 : wal_file_name.push_str(".partial");
169 0 : }
170 0 : ar.append_file(&wal_file_name, &mut sf).await?;
171 : }
172 :
173 : // Do the term check before ar.finish to make archive corrupted in case of
174 : // term change. Client shouldn't ignore abrupt stream end, but to be sure.
175 0 : tli.finish_snapshot(&bctx).await?;
176 :
177 0 : ar.finish().await?;
178 :
179 0 : Ok(())
180 0 : }
181 :
182 : impl Timeline {
183 : /// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
184 : /// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
185 : /// we are offloaded and there aren't any)
186 0 : async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
187 0 : self: &Arc<Timeline>,
188 0 : ar: &mut tokio_tar::Builder<W>,
189 0 : source: NodeId,
190 0 : destination: NodeId,
191 0 : ) -> Result<()> {
192 : // Take initial copy of control file, then release state lock
193 0 : let mut control_file = {
194 0 : let shared_state = self.write_shared_state().await;
195 :
196 0 : let control_file = TimelinePersistentState::clone(shared_state.sk.state());
197 :
198 : // Rare race: we got unevicted between entering function and reading control file.
199 : // We error out and let API caller retry.
200 0 : if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
201 0 : bail!("Timeline was un-evicted during snapshot, please retry");
202 0 : }
203 0 :
204 0 : control_file
205 : };
206 :
207 : // Modify the partial segment of the in-memory copy for the control file to
208 : // point to the destination safekeeper.
209 0 : let replace = control_file
210 0 : .partial_backup
211 0 : .replace_uploaded_segment(source, destination)?;
212 :
213 0 : let Some(replace) = replace else {
214 : // In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
215 : // has a partial segment. It is unexpected that
216 0 : anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
217 : };
218 :
219 0 : tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
220 :
221 : // Optimistically try to copy the partial segment to the destination's path: this
222 : // can fail if the timeline was un-evicted and modified in the background.
223 0 : let remote_timeline_path = &self.remote_path;
224 0 : wal_backup::copy_partial_segment(
225 0 : &replace.previous.remote_path(remote_timeline_path),
226 0 : &replace.current.remote_path(remote_timeline_path),
227 0 : )
228 0 : .await?;
229 :
230 : // Since the S3 copy succeeded with the path given in our control file snapshot, and
231 : // we are sending that snapshot in our response, we are giving the caller a consistent
232 : // snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
233 0 : let buf = control_file
234 0 : .write_to_buf()
235 0 : .with_context(|| "failed to serialize control store")?;
236 0 : let mut header = Header::new_gnu();
237 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
238 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
239 0 : .await
240 0 : .with_context(|| "failed to append to archive")?;
241 :
242 0 : Ok(())
243 0 : }
244 : }
245 :
246 : impl WalResidentTimeline {
247 : /// Start streaming tar archive with timeline:
248 : /// 1) stream control file under lock;
249 : /// 2) hold off WAL removal;
250 : /// 3) collect SnapshotContext to understand which WAL segments should be
251 : /// streamed.
252 : ///
253 : /// Snapshot streams data up to flush_lsn. To make this safe, we must check
254 : /// that term doesn't change during the procedure, or we risk sending mix of
255 : /// WAL from different histories. Term is remembered in the SnapshotContext
256 : /// and checked in finish_snapshot. Note that in the last segment some WAL
257 : /// higher than flush_lsn set here might be streamed; that's fine as long as
258 : /// terms doesn't change.
259 : ///
260 : /// Alternatively we could send only up to commit_lsn to get some valid
261 : /// state which later will be recovered by compute, in this case term check
262 : /// is not needed, but we likely don't want that as there might be no
263 : /// compute which could perform the recovery.
264 : ///
265 : /// When returned SnapshotContext is dropped WAL hold is removed.
266 0 : async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
267 0 : &self,
268 0 : ar: &mut tokio_tar::Builder<W>,
269 0 : source: NodeId,
270 0 : destination: NodeId,
271 0 : ) -> Result<SnapshotContext> {
272 0 : let mut shared_state = self.write_shared_state().await;
273 0 : let wal_seg_size = shared_state.get_wal_seg_size();
274 0 :
275 0 : let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
276 : // Modify the partial segment of the in-memory copy for the control file to
277 : // point to the destination safekeeper.
278 0 : let replace = control_store
279 0 : .partial_backup
280 0 : .replace_uploaded_segment(source, destination)?;
281 :
282 0 : if let Some(replace) = replace {
283 : // The deserialized control file has an uploaded partial. We upload a copy
284 : // of it to object storage for the destination safekeeper and send an updated
285 : // control file in the snapshot.
286 0 : tracing::info!(
287 0 : "Replacing uploaded partial segment in in-mem control file: {replace:?}"
288 : );
289 :
290 0 : let remote_timeline_path = &self.tli.remote_path;
291 0 : wal_backup::copy_partial_segment(
292 0 : &replace.previous.remote_path(remote_timeline_path),
293 0 : &replace.current.remote_path(remote_timeline_path),
294 0 : )
295 0 : .await?;
296 0 : }
297 :
298 0 : let buf = control_store
299 0 : .write_to_buf()
300 0 : .with_context(|| "failed to serialize control store")?;
301 0 : let mut header = Header::new_gnu();
302 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
303 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
304 0 : .await
305 0 : .with_context(|| "failed to append to archive")?;
306 :
307 : // We need to stream since the oldest segment someone (s3 or pageserver)
308 : // still needs. This duplicates calc_horizon_lsn logic.
309 : //
310 : // We know that WAL wasn't removed up to this point because it cannot be
311 : // removed further than `backup_lsn`. Since we're holding shared_state
312 : // lock and setting `wal_removal_on_hold` later, it guarantees that WAL
313 : // won't be removed until we're done.
314 0 : let from_lsn = min(
315 0 : shared_state.sk.state().remote_consistent_lsn,
316 0 : shared_state.sk.state().backup_lsn,
317 0 : );
318 0 : if from_lsn == Lsn::INVALID {
319 : // this is possible if snapshot is called before handling first
320 : // elected message
321 0 : bail!("snapshot is called on uninitialized timeline");
322 0 : }
323 0 : let from_segno = from_lsn.segment_number(wal_seg_size);
324 0 : let term = shared_state.sk.state().acceptor_state.term;
325 0 : let last_log_term = shared_state.sk.last_log_term();
326 0 : let flush_lsn = shared_state.sk.flush_lsn();
327 0 : let upto_segno = flush_lsn.segment_number(wal_seg_size);
328 : // have some limit on max number of segments as a sanity check
329 : const MAX_ALLOWED_SEGS: u64 = 1000;
330 0 : let num_segs = upto_segno - from_segno + 1;
331 0 : if num_segs > MAX_ALLOWED_SEGS {
332 0 : bail!(
333 0 : "snapshot is called on timeline with {} segments, but the limit is {}",
334 0 : num_segs,
335 0 : MAX_ALLOWED_SEGS
336 0 : );
337 0 : }
338 0 :
339 0 : // Prevent WAL removal while we're streaming data.
340 0 : //
341 0 : // Since this a flag, not a counter just bail out if already set; we
342 0 : // shouldn't need concurrent snapshotting.
343 0 : if shared_state.wal_removal_on_hold {
344 0 : bail!("wal_removal_on_hold is already true");
345 0 : }
346 0 : shared_state.wal_removal_on_hold = true;
347 0 :
348 0 : // Drop shared_state to release the lock, before calling wal_residence_guard().
349 0 : drop(shared_state);
350 :
351 0 : let tli_copy = self.wal_residence_guard().await?;
352 0 : let bctx = SnapshotContext {
353 0 : from_segno,
354 0 : upto_segno,
355 0 : term,
356 0 : last_log_term,
357 0 : flush_lsn,
358 0 : wal_seg_size,
359 0 : tli: tli_copy,
360 0 : };
361 0 :
362 0 : Ok(bctx)
363 0 : }
364 :
365 : /// Finish snapshotting: check that term(s) hasn't changed.
366 : ///
367 : /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
368 : /// forget this if snapshotting fails mid the way.
369 0 : pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
370 0 : let shared_state = self.read_shared_state().await;
371 0 : let term = shared_state.sk.state().acceptor_state.term;
372 0 : let last_log_term = shared_state.sk.last_log_term();
373 0 : // There are some cases to relax this check (e.g. last_log_term might
374 0 : // change, but as long as older history is strictly part of new that's
375 0 : // fine), but there is no need to do it.
376 0 : if bctx.term != term || bctx.last_log_term != last_log_term {
377 0 : bail!("term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
378 0 : bctx.term, bctx.last_log_term, term, last_log_term);
379 0 : }
380 0 : Ok(())
381 0 : }
382 : }
383 :
384 : /// Response for debug dump request.
385 0 : #[derive(Debug, Deserialize)]
386 : pub struct DebugDumpResponse {
387 : pub start_time: DateTime<Utc>,
388 : pub finish_time: DateTime<Utc>,
389 : pub timelines: Vec<debug_dump::Timeline>,
390 : pub timelines_count: usize,
391 : pub config: debug_dump::Config,
392 : }
393 :
394 : /// Find the most advanced safekeeper and pull timeline from it.
395 0 : pub async fn handle_request(
396 0 : request: PullTimelineRequest,
397 0 : sk_auth_token: Option<SecretString>,
398 0 : global_timelines: Arc<GlobalTimelines>,
399 0 : ) -> Result<PullTimelineResponse> {
400 0 : let existing_tli = global_timelines.get(TenantTimelineId::new(
401 0 : request.tenant_id,
402 0 : request.timeline_id,
403 0 : ));
404 0 : if existing_tli.is_ok() {
405 0 : bail!("Timeline {} already exists", request.timeline_id);
406 0 : }
407 0 :
408 0 : let http_hosts = request.http_hosts.clone();
409 :
410 : // Figure out statuses of potential donors.
411 0 : let responses: Vec<Result<TimelineStatus, mgmt_api::Error>> =
412 0 : futures::future::join_all(http_hosts.iter().map(|url| async {
413 0 : let cclient = Client::new(url.clone(), sk_auth_token.clone());
414 0 : let info = cclient
415 0 : .timeline_status(request.tenant_id, request.timeline_id)
416 0 : .await?;
417 0 : Ok(info)
418 0 : }))
419 0 : .await;
420 :
421 0 : let mut statuses = Vec::new();
422 0 : for (i, response) in responses.into_iter().enumerate() {
423 0 : let status = response.context(format!("fetching status from {}", http_hosts[i]))?;
424 0 : statuses.push((status, i));
425 : }
426 :
427 : // Find the most advanced safekeeper
428 0 : let (status, i) = statuses
429 0 : .into_iter()
430 0 : .max_by_key(|(status, _)| {
431 0 : (
432 0 : status.acceptor_state.epoch,
433 0 : status.flush_lsn,
434 0 : status.commit_lsn,
435 0 : )
436 0 : })
437 0 : .unwrap();
438 0 : let safekeeper_host = http_hosts[i].clone();
439 0 :
440 0 : assert!(status.tenant_id == request.tenant_id);
441 0 : assert!(status.timeline_id == request.timeline_id);
442 :
443 0 : pull_timeline(status, safekeeper_host, sk_auth_token, global_timelines).await
444 0 : }
445 :
446 0 : async fn pull_timeline(
447 0 : status: TimelineStatus,
448 0 : host: String,
449 0 : sk_auth_token: Option<SecretString>,
450 0 : global_timelines: Arc<GlobalTimelines>,
451 0 : ) -> Result<PullTimelineResponse> {
452 0 : let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
453 0 : info!(
454 0 : "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
455 : ttid,
456 : host,
457 : status.commit_lsn,
458 : status.flush_lsn,
459 : status.acceptor_state.term,
460 : status.acceptor_state.epoch
461 : );
462 :
463 0 : let conf = &global_timelines.get_global_config();
464 :
465 0 : let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
466 :
467 0 : let client = Client::new(host.clone(), sk_auth_token.clone());
468 : // Request stream with basebackup archive.
469 0 : let bb_resp = client
470 0 : .snapshot(status.tenant_id, status.timeline_id, conf.my_id)
471 0 : .await?;
472 :
473 : // Make Stream of Bytes from it...
474 0 : let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
475 0 : // and turn it into StreamReader implementing AsyncRead.
476 0 : let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
477 :
478 : // Extract it on the fly to the disk. We don't use simple unpack() to fsync
479 : // files.
480 0 : let mut entries = Archive::new(bb_reader).entries()?;
481 0 : while let Some(base_tar_entry) = entries.next().await {
482 0 : let mut entry = base_tar_entry?;
483 0 : let header = entry.header();
484 0 : let file_path = header.path()?.into_owned();
485 0 : match header.entry_type() {
486 : tokio_tar::EntryType::Regular => {
487 0 : let utf8_file_path =
488 0 : Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
489 0 : let dst_path = tli_dir_path.join(utf8_file_path);
490 0 : let mut f = OpenOptions::new()
491 0 : .create(true)
492 0 : .truncate(true)
493 0 : .write(true)
494 0 : .open(&dst_path)
495 0 : .await?;
496 0 : tokio::io::copy(&mut entry, &mut f).await?;
497 : // fsync the file
498 0 : f.sync_all().await?;
499 : }
500 : _ => {
501 0 : bail!(
502 0 : "entry {} in backup tar archive is of unexpected type: {:?}",
503 0 : file_path.display(),
504 0 : header.entry_type()
505 0 : );
506 : }
507 : }
508 : }
509 :
510 : // fsync temp timeline directory to remember its contents.
511 0 : fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
512 :
513 : // Let's create timeline from temp directory and verify that it's correct
514 0 : let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?;
515 0 : info!(
516 0 : "finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
517 : ttid, commit_lsn, flush_lsn
518 : );
519 0 : assert!(status.commit_lsn <= status.flush_lsn);
520 :
521 : // Finally, load the timeline.
522 0 : let _tli = global_timelines
523 0 : .load_temp_timeline(ttid, &tli_dir_path, false)
524 0 : .await?;
525 :
526 0 : Ok(PullTimelineResponse {
527 0 : safekeeper_host: host,
528 0 : })
529 0 : }
|