Line data Source code
1 : use anyhow::{anyhow, bail, Context, Result};
2 : use bytes::Bytes;
3 : use camino::Utf8PathBuf;
4 : use chrono::{DateTime, Utc};
5 : use futures::{SinkExt, StreamExt, TryStreamExt};
6 : use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI};
7 : use serde::{Deserialize, Serialize};
8 : use std::{
9 : cmp::min,
10 : io::{self, ErrorKind},
11 : sync::Arc,
12 : };
13 : use tokio::{fs::OpenOptions, io::AsyncWrite, sync::mpsc, task};
14 : use tokio_tar::{Archive, Builder, Header};
15 : use tokio_util::{
16 : io::{CopyToBytes, SinkWriter},
17 : sync::PollSender,
18 : };
19 : use tracing::{error, info, instrument};
20 :
21 : use crate::{
22 : control_file::CONTROL_FILE_NAME,
23 : debug_dump,
24 : http::{
25 : client::{self, Client},
26 : routes::TimelineStatus,
27 : },
28 : safekeeper::Term,
29 : state::{EvictionState, TimelinePersistentState},
30 : timeline::{Timeline, WalResidentTimeline},
31 : timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline},
32 : wal_backup,
33 : wal_storage::open_wal_file,
34 : GlobalTimelines,
35 : };
36 : use utils::{
37 : crashsafe::fsync_async_opt,
38 : id::{NodeId, TenantId, TenantTimelineId, TimelineId},
39 : logging::SecretString,
40 : lsn::Lsn,
41 : pausable_failpoint,
42 : };
43 :
44 : /// Stream tar archive of timeline to tx.
45 0 : #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
46 : pub async fn stream_snapshot(
47 : tli: Arc<Timeline>,
48 : source: NodeId,
49 : destination: NodeId,
50 : tx: mpsc::Sender<Result<Bytes>>,
51 : ) {
52 : match tli.try_wal_residence_guard().await {
53 : Err(e) => {
54 : tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
55 : .await
56 : .ok();
57 : }
58 : Ok(maybe_resident_tli) => {
59 : if let Err(e) = match maybe_resident_tli {
60 : Some(resident_tli) => {
61 : stream_snapshot_resident_guts(resident_tli, source, destination, tx.clone())
62 : .await
63 : }
64 : None => stream_snapshot_offloaded_guts(tli, source, destination, tx.clone()).await,
65 : } {
66 : // Error type/contents don't matter as they won't can't reach the client
67 : // (hyper likely doesn't do anything with it), but http stream will be
68 : // prematurely terminated. It would be nice to try to send the error in
69 : // trailers though.
70 : tx.send(Err(anyhow!("snapshot failed"))).await.ok();
71 : error!("snapshot failed: {:#}", e);
72 : }
73 : }
74 : }
75 : }
76 :
77 : /// State needed while streaming the snapshot.
78 : pub struct SnapshotContext {
79 : pub from_segno: XLogSegNo, // including
80 : pub upto_segno: XLogSegNo, // including
81 : pub term: Term,
82 : pub last_log_term: Term,
83 : pub flush_lsn: Lsn,
84 : pub wal_seg_size: usize,
85 : // used to remove WAL hold off in Drop.
86 : pub tli: WalResidentTimeline,
87 : }
88 :
89 : impl Drop for SnapshotContext {
90 0 : fn drop(&mut self) {
91 0 : let tli = self.tli.clone();
92 0 : task::spawn(async move {
93 0 : let mut shared_state = tli.write_shared_state().await;
94 0 : shared_state.wal_removal_on_hold = false;
95 0 : });
96 0 : }
97 : }
98 :
99 : /// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
100 0 : fn prepare_tar_stream(
101 0 : tx: mpsc::Sender<Result<Bytes>>,
102 0 : ) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
103 0 : // tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
104 0 : // use SinkWriter as a Write impl. That is,
105 0 : // - create Sink from the tx. It returns PollSendError if chan is closed.
106 0 : let sink = PollSender::new(tx);
107 0 : // - SinkWriter needs sink error to be io one, map it.
108 0 : let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
109 0 : // - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
110 0 : // it with with(). Note that with() accepts async function which we don't
111 0 : // need and allows the map to fail, which we don't need either, but hence
112 0 : // two Oks.
113 0 : let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
114 0 : // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
115 0 : // into CopyToBytes. This is a data copy.
116 0 : let copy_to_bytes = CopyToBytes::new(oksink);
117 0 : let writer = SinkWriter::new(copy_to_bytes);
118 0 : let pinned_writer = Box::pin(writer);
119 0 :
120 0 : // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
121 0 : // which is also likely suboptimal.
122 0 : Builder::new_non_terminated(pinned_writer)
123 0 : }
124 :
125 : /// Implementation of snapshot for an offloaded timeline, only reads control file
126 0 : pub(crate) async fn stream_snapshot_offloaded_guts(
127 0 : tli: Arc<Timeline>,
128 0 : source: NodeId,
129 0 : destination: NodeId,
130 0 : tx: mpsc::Sender<Result<Bytes>>,
131 0 : ) -> Result<()> {
132 0 : let mut ar = prepare_tar_stream(tx);
133 0 :
134 0 : tli.snapshot_offloaded(&mut ar, source, destination).await?;
135 :
136 0 : ar.finish().await?;
137 :
138 0 : Ok(())
139 0 : }
140 :
141 : /// Implementation of snapshot for a timeline which is resident (includes some segment data)
142 0 : pub async fn stream_snapshot_resident_guts(
143 0 : tli: WalResidentTimeline,
144 0 : source: NodeId,
145 0 : destination: NodeId,
146 0 : tx: mpsc::Sender<Result<Bytes>>,
147 0 : ) -> Result<()> {
148 0 : let mut ar = prepare_tar_stream(tx);
149 :
150 0 : let bctx = tli.start_snapshot(&mut ar, source, destination).await?;
151 0 : pausable_failpoint!("sk-snapshot-after-list-pausable");
152 :
153 0 : let tli_dir = tli.get_timeline_dir();
154 0 : info!(
155 0 : "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
156 0 : bctx.upto_segno - bctx.from_segno + 1,
157 : bctx.from_segno,
158 : bctx.upto_segno,
159 : bctx.term,
160 : bctx.last_log_term,
161 : bctx.flush_lsn,
162 : );
163 0 : for segno in bctx.from_segno..=bctx.upto_segno {
164 0 : let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?;
165 0 : let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
166 0 : if is_partial {
167 0 : wal_file_name.push_str(".partial");
168 0 : }
169 0 : ar.append_file(&wal_file_name, &mut sf).await?;
170 : }
171 :
172 : // Do the term check before ar.finish to make archive corrupted in case of
173 : // term change. Client shouldn't ignore abrupt stream end, but to be sure.
174 0 : tli.finish_snapshot(&bctx).await?;
175 :
176 0 : ar.finish().await?;
177 :
178 0 : Ok(())
179 0 : }
180 :
181 : impl Timeline {
182 : /// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
183 : /// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
184 : /// we are offloaded and there aren't any)
185 0 : async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
186 0 : self: &Arc<Timeline>,
187 0 : ar: &mut tokio_tar::Builder<W>,
188 0 : source: NodeId,
189 0 : destination: NodeId,
190 0 : ) -> Result<()> {
191 : // Take initial copy of control file, then release state lock
192 0 : let mut control_file = {
193 0 : let shared_state = self.write_shared_state().await;
194 :
195 0 : let control_file = TimelinePersistentState::clone(shared_state.sk.state());
196 :
197 : // Rare race: we got unevicted between entering function and reading control file.
198 : // We error out and let API caller retry.
199 0 : if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
200 0 : bail!("Timeline was un-evicted during snapshot, please retry");
201 0 : }
202 0 :
203 0 : control_file
204 : };
205 :
206 : // Modify the partial segment of the in-memory copy for the control file to
207 : // point to the destination safekeeper.
208 0 : let replace = control_file
209 0 : .partial_backup
210 0 : .replace_uploaded_segment(source, destination)?;
211 :
212 0 : let Some(replace) = replace else {
213 : // In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
214 : // has a partial segment. It is unexpected that
215 0 : anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
216 : };
217 :
218 0 : tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
219 :
220 : // Optimistically try to copy the partial segment to the destination's path: this
221 : // can fail if the timeline was un-evicted and modified in the background.
222 0 : let remote_timeline_path = &self.remote_path;
223 0 : wal_backup::copy_partial_segment(
224 0 : &replace.previous.remote_path(remote_timeline_path),
225 0 : &replace.current.remote_path(remote_timeline_path),
226 0 : )
227 0 : .await?;
228 :
229 : // Since the S3 copy succeeded with the path given in our control file snapshot, and
230 : // we are sending that snapshot in our response, we are giving the caller a consistent
231 : // snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
232 0 : let buf = control_file
233 0 : .write_to_buf()
234 0 : .with_context(|| "failed to serialize control store")?;
235 0 : let mut header = Header::new_gnu();
236 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
237 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
238 0 : .await
239 0 : .with_context(|| "failed to append to archive")?;
240 :
241 0 : Ok(())
242 0 : }
243 : }
244 :
245 : impl WalResidentTimeline {
246 : /// Start streaming tar archive with timeline:
247 : /// 1) stream control file under lock;
248 : /// 2) hold off WAL removal;
249 : /// 3) collect SnapshotContext to understand which WAL segments should be
250 : /// streamed.
251 : ///
252 : /// Snapshot streams data up to flush_lsn. To make this safe, we must check
253 : /// that term doesn't change during the procedure, or we risk sending mix of
254 : /// WAL from different histories. Term is remembered in the SnapshotContext
255 : /// and checked in finish_snapshot. Note that in the last segment some WAL
256 : /// higher than flush_lsn set here might be streamed; that's fine as long as
257 : /// terms doesn't change.
258 : ///
259 : /// Alternatively we could send only up to commit_lsn to get some valid
260 : /// state which later will be recovered by compute, in this case term check
261 : /// is not needed, but we likely don't want that as there might be no
262 : /// compute which could perform the recovery.
263 : ///
264 : /// When returned SnapshotContext is dropped WAL hold is removed.
265 0 : async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
266 0 : &self,
267 0 : ar: &mut tokio_tar::Builder<W>,
268 0 : source: NodeId,
269 0 : destination: NodeId,
270 0 : ) -> Result<SnapshotContext> {
271 0 : let mut shared_state = self.write_shared_state().await;
272 0 : let wal_seg_size = shared_state.get_wal_seg_size();
273 0 :
274 0 : let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
275 : // Modify the partial segment of the in-memory copy for the control file to
276 : // point to the destination safekeeper.
277 0 : let replace = control_store
278 0 : .partial_backup
279 0 : .replace_uploaded_segment(source, destination)?;
280 :
281 0 : if let Some(replace) = replace {
282 : // The deserialized control file has an uploaded partial. We upload a copy
283 : // of it to object storage for the destination safekeeper and send an updated
284 : // control file in the snapshot.
285 0 : tracing::info!(
286 0 : "Replacing uploaded partial segment in in-mem control file: {replace:?}"
287 : );
288 :
289 0 : let remote_timeline_path = &self.tli.remote_path;
290 0 : wal_backup::copy_partial_segment(
291 0 : &replace.previous.remote_path(remote_timeline_path),
292 0 : &replace.current.remote_path(remote_timeline_path),
293 0 : )
294 0 : .await?;
295 0 : }
296 :
297 0 : let buf = control_store
298 0 : .write_to_buf()
299 0 : .with_context(|| "failed to serialize control store")?;
300 0 : let mut header = Header::new_gnu();
301 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
302 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
303 0 : .await
304 0 : .with_context(|| "failed to append to archive")?;
305 :
306 : // We need to stream since the oldest segment someone (s3 or pageserver)
307 : // still needs. This duplicates calc_horizon_lsn logic.
308 : //
309 : // We know that WAL wasn't removed up to this point because it cannot be
310 : // removed further than `backup_lsn`. Since we're holding shared_state
311 : // lock and setting `wal_removal_on_hold` later, it guarantees that WAL
312 : // won't be removed until we're done.
313 0 : let from_lsn = min(
314 0 : shared_state.sk.state().remote_consistent_lsn,
315 0 : shared_state.sk.state().backup_lsn,
316 0 : );
317 0 : if from_lsn == Lsn::INVALID {
318 : // this is possible if snapshot is called before handling first
319 : // elected message
320 0 : bail!("snapshot is called on uninitialized timeline");
321 0 : }
322 0 : let from_segno = from_lsn.segment_number(wal_seg_size);
323 0 : let term = shared_state.sk.state().acceptor_state.term;
324 0 : let last_log_term = shared_state.sk.last_log_term();
325 0 : let flush_lsn = shared_state.sk.flush_lsn();
326 0 : let upto_segno = flush_lsn.segment_number(wal_seg_size);
327 : // have some limit on max number of segments as a sanity check
328 : const MAX_ALLOWED_SEGS: u64 = 1000;
329 0 : let num_segs = upto_segno - from_segno + 1;
330 0 : if num_segs > MAX_ALLOWED_SEGS {
331 0 : bail!(
332 0 : "snapshot is called on timeline with {} segments, but the limit is {}",
333 0 : num_segs,
334 0 : MAX_ALLOWED_SEGS
335 0 : );
336 0 : }
337 0 :
338 0 : // Prevent WAL removal while we're streaming data.
339 0 : //
340 0 : // Since this a flag, not a counter just bail out if already set; we
341 0 : // shouldn't need concurrent snapshotting.
342 0 : if shared_state.wal_removal_on_hold {
343 0 : bail!("wal_removal_on_hold is already true");
344 0 : }
345 0 : shared_state.wal_removal_on_hold = true;
346 0 :
347 0 : // Drop shared_state to release the lock, before calling wal_residence_guard().
348 0 : drop(shared_state);
349 :
350 0 : let tli_copy = self.wal_residence_guard().await?;
351 0 : let bctx = SnapshotContext {
352 0 : from_segno,
353 0 : upto_segno,
354 0 : term,
355 0 : last_log_term,
356 0 : flush_lsn,
357 0 : wal_seg_size,
358 0 : tli: tli_copy,
359 0 : };
360 0 :
361 0 : Ok(bctx)
362 0 : }
363 :
364 : /// Finish snapshotting: check that term(s) hasn't changed.
365 : ///
366 : /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
367 : /// forget this if snapshotting fails mid the way.
368 0 : pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
369 0 : let shared_state = self.read_shared_state().await;
370 0 : let term = shared_state.sk.state().acceptor_state.term;
371 0 : let last_log_term = shared_state.sk.last_log_term();
372 0 : // There are some cases to relax this check (e.g. last_log_term might
373 0 : // change, but as long as older history is strictly part of new that's
374 0 : // fine), but there is no need to do it.
375 0 : if bctx.term != term || bctx.last_log_term != last_log_term {
376 0 : bail!("term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
377 0 : bctx.term, bctx.last_log_term, term, last_log_term);
378 0 : }
379 0 : Ok(())
380 0 : }
381 : }
382 :
383 : /// pull_timeline request body.
384 0 : #[derive(Debug, Deserialize)]
385 : pub struct Request {
386 : pub tenant_id: TenantId,
387 : pub timeline_id: TimelineId,
388 : pub http_hosts: Vec<String>,
389 : }
390 :
391 : #[derive(Debug, Serialize)]
392 : pub struct Response {
393 : // Donor safekeeper host
394 : pub safekeeper_host: String,
395 : // TODO: add more fields?
396 : }
397 :
398 : /// Response for debug dump request.
399 0 : #[derive(Debug, Deserialize)]
400 : pub struct DebugDumpResponse {
401 : pub start_time: DateTime<Utc>,
402 : pub finish_time: DateTime<Utc>,
403 : pub timelines: Vec<debug_dump::Timeline>,
404 : pub timelines_count: usize,
405 : pub config: debug_dump::Config,
406 : }
407 :
408 : /// Find the most advanced safekeeper and pull timeline from it.
409 0 : pub async fn handle_request(
410 0 : request: Request,
411 0 : sk_auth_token: Option<SecretString>,
412 0 : ) -> Result<Response> {
413 0 : let existing_tli = GlobalTimelines::get(TenantTimelineId::new(
414 0 : request.tenant_id,
415 0 : request.timeline_id,
416 0 : ));
417 0 : if existing_tli.is_ok() {
418 0 : bail!("Timeline {} already exists", request.timeline_id);
419 0 : }
420 0 :
421 0 : let http_hosts = request.http_hosts.clone();
422 :
423 : // Figure out statuses of potential donors.
424 0 : let responses: Vec<Result<TimelineStatus, client::Error>> =
425 0 : futures::future::join_all(http_hosts.iter().map(|url| async {
426 0 : let cclient = Client::new(url.clone(), sk_auth_token.clone());
427 0 : let info = cclient
428 0 : .timeline_status(request.tenant_id, request.timeline_id)
429 0 : .await?;
430 0 : Ok(info)
431 0 : }))
432 0 : .await;
433 :
434 0 : let mut statuses = Vec::new();
435 0 : for (i, response) in responses.into_iter().enumerate() {
436 0 : let status = response.context(format!("fetching status from {}", http_hosts[i]))?;
437 0 : statuses.push((status, i));
438 : }
439 :
440 : // Find the most advanced safekeeper
441 0 : let (status, i) = statuses
442 0 : .into_iter()
443 0 : .max_by_key(|(status, _)| {
444 0 : (
445 0 : status.acceptor_state.epoch,
446 0 : status.flush_lsn,
447 0 : status.commit_lsn,
448 0 : )
449 0 : })
450 0 : .unwrap();
451 0 : let safekeeper_host = http_hosts[i].clone();
452 0 :
453 0 : assert!(status.tenant_id == request.tenant_id);
454 0 : assert!(status.timeline_id == request.timeline_id);
455 :
456 0 : pull_timeline(status, safekeeper_host, sk_auth_token).await
457 0 : }
458 :
459 0 : async fn pull_timeline(
460 0 : status: TimelineStatus,
461 0 : host: String,
462 0 : sk_auth_token: Option<SecretString>,
463 0 : ) -> Result<Response> {
464 0 : let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
465 0 : info!(
466 0 : "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
467 : ttid,
468 : host,
469 : status.commit_lsn,
470 : status.flush_lsn,
471 : status.acceptor_state.term,
472 : status.acceptor_state.epoch
473 : );
474 :
475 0 : let conf = &GlobalTimelines::get_global_config();
476 :
477 0 : let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
478 :
479 0 : let client = Client::new(host.clone(), sk_auth_token.clone());
480 : // Request stream with basebackup archive.
481 0 : let bb_resp = client
482 0 : .snapshot(status.tenant_id, status.timeline_id, conf.my_id)
483 0 : .await?;
484 :
485 : // Make Stream of Bytes from it...
486 0 : let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
487 0 : // and turn it into StreamReader implementing AsyncRead.
488 0 : let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
489 :
490 : // Extract it on the fly to the disk. We don't use simple unpack() to fsync
491 : // files.
492 0 : let mut entries = Archive::new(bb_reader).entries()?;
493 0 : while let Some(base_tar_entry) = entries.next().await {
494 0 : let mut entry = base_tar_entry?;
495 0 : let header = entry.header();
496 0 : let file_path = header.path()?.into_owned();
497 0 : match header.entry_type() {
498 : tokio_tar::EntryType::Regular => {
499 0 : let utf8_file_path =
500 0 : Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
501 0 : let dst_path = tli_dir_path.join(utf8_file_path);
502 0 : let mut f = OpenOptions::new()
503 0 : .create(true)
504 0 : .truncate(true)
505 0 : .write(true)
506 0 : .open(&dst_path)
507 0 : .await?;
508 0 : tokio::io::copy(&mut entry, &mut f).await?;
509 : // fsync the file
510 0 : f.sync_all().await?;
511 : }
512 : _ => {
513 0 : bail!(
514 0 : "entry {} in backup tar archive is of unexpected type: {:?}",
515 0 : file_path.display(),
516 0 : header.entry_type()
517 0 : );
518 : }
519 : }
520 : }
521 :
522 : // fsync temp timeline directory to remember its contents.
523 0 : fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
524 :
525 : // Let's create timeline from temp directory and verify that it's correct
526 0 : let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?;
527 0 : info!(
528 0 : "finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
529 : ttid, commit_lsn, flush_lsn
530 : );
531 0 : assert!(status.commit_lsn <= status.flush_lsn);
532 :
533 : // Finally, load the timeline.
534 0 : let _tli = GlobalTimelines::load_temp_timeline(ttid, &tli_dir_path, false).await?;
535 :
536 0 : Ok(Response {
537 0 : safekeeper_host: host,
538 0 : })
539 0 : }
|