Line data Source code
1 : use std::cmp::min;
2 : use std::io::{self, ErrorKind};
3 : use std::sync::Arc;
4 :
5 : use anyhow::{Context, Result, anyhow, bail};
6 : use bytes::Bytes;
7 : use camino::Utf8PathBuf;
8 : use chrono::{DateTime, Utc};
9 : use futures::{SinkExt, StreamExt, TryStreamExt};
10 : use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
11 : use reqwest::Certificate;
12 : use safekeeper_api::Term;
13 : use safekeeper_api::models::{PullTimelineRequest, PullTimelineResponse, TimelineStatus};
14 : use safekeeper_client::mgmt_api;
15 : use safekeeper_client::mgmt_api::Client;
16 : use serde::Deserialize;
17 : use tokio::fs::OpenOptions;
18 : use tokio::io::AsyncWrite;
19 : use tokio::sync::mpsc;
20 : use tokio::task;
21 : use tokio_tar::{Archive, Builder, Header};
22 : use tokio_util::io::{CopyToBytes, SinkWriter};
23 : use tokio_util::sync::PollSender;
24 : use tracing::{error, info, instrument};
25 : use utils::crashsafe::fsync_async_opt;
26 : use utils::id::{NodeId, TenantTimelineId};
27 : use utils::logging::SecretString;
28 : use utils::lsn::Lsn;
29 : use utils::pausable_failpoint;
30 :
31 : use crate::control_file::CONTROL_FILE_NAME;
32 : use crate::state::{EvictionState, TimelinePersistentState};
33 : use crate::timeline::{Timeline, WalResidentTimeline};
34 : use crate::timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline};
35 : use crate::wal_storage::open_wal_file;
36 : use crate::{GlobalTimelines, debug_dump, wal_backup};
37 :
38 : /// Stream tar archive of timeline to tx.
39 : #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
40 : pub async fn stream_snapshot(
41 : tli: Arc<Timeline>,
42 : source: NodeId,
43 : destination: NodeId,
44 : tx: mpsc::Sender<Result<Bytes>>,
45 : ) {
46 : match tli.try_wal_residence_guard().await {
47 : Err(e) => {
48 : tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
49 : .await
50 : .ok();
51 : }
52 : Ok(maybe_resident_tli) => {
53 : if let Err(e) = match maybe_resident_tli {
54 : Some(resident_tli) => {
55 : stream_snapshot_resident_guts(resident_tli, source, destination, tx.clone())
56 : .await
57 : }
58 : None => stream_snapshot_offloaded_guts(tli, source, destination, tx.clone()).await,
59 : } {
60 : // Error type/contents don't matter as they won't can't reach the client
61 : // (hyper likely doesn't do anything with it), but http stream will be
62 : // prematurely terminated. It would be nice to try to send the error in
63 : // trailers though.
64 : tx.send(Err(anyhow!("snapshot failed"))).await.ok();
65 : error!("snapshot failed: {:#}", e);
66 : }
67 : }
68 : }
69 : }
70 :
71 : /// State needed while streaming the snapshot.
72 : pub struct SnapshotContext {
73 : pub from_segno: XLogSegNo, // including
74 : pub upto_segno: XLogSegNo, // including
75 : pub term: Term,
76 : pub last_log_term: Term,
77 : pub flush_lsn: Lsn,
78 : pub wal_seg_size: usize,
79 : // used to remove WAL hold off in Drop.
80 : pub tli: WalResidentTimeline,
81 : }
82 :
83 : impl Drop for SnapshotContext {
84 0 : fn drop(&mut self) {
85 0 : let tli = self.tli.clone();
86 0 : task::spawn(async move {
87 0 : let mut shared_state = tli.write_shared_state().await;
88 0 : shared_state.wal_removal_on_hold = false;
89 0 : });
90 0 : }
91 : }
92 :
93 : /// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
94 0 : fn prepare_tar_stream(
95 0 : tx: mpsc::Sender<Result<Bytes>>,
96 0 : ) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
97 0 : // tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
98 0 : // use SinkWriter as a Write impl. That is,
99 0 : // - create Sink from the tx. It returns PollSendError if chan is closed.
100 0 : let sink = PollSender::new(tx);
101 0 : // - SinkWriter needs sink error to be io one, map it.
102 0 : let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
103 0 : // - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
104 0 : // it with with(). Note that with() accepts async function which we don't
105 0 : // need and allows the map to fail, which we don't need either, but hence
106 0 : // two Oks.
107 0 : let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
108 0 : // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
109 0 : // into CopyToBytes. This is a data copy.
110 0 : let copy_to_bytes = CopyToBytes::new(oksink);
111 0 : let writer = SinkWriter::new(copy_to_bytes);
112 0 : let pinned_writer = Box::pin(writer);
113 0 :
114 0 : // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
115 0 : // which is also likely suboptimal.
116 0 : Builder::new_non_terminated(pinned_writer)
117 0 : }
118 :
119 : /// Implementation of snapshot for an offloaded timeline, only reads control file
120 0 : pub(crate) async fn stream_snapshot_offloaded_guts(
121 0 : tli: Arc<Timeline>,
122 0 : source: NodeId,
123 0 : destination: NodeId,
124 0 : tx: mpsc::Sender<Result<Bytes>>,
125 0 : ) -> Result<()> {
126 0 : let mut ar = prepare_tar_stream(tx);
127 0 :
128 0 : tli.snapshot_offloaded(&mut ar, source, destination).await?;
129 :
130 0 : ar.finish().await?;
131 :
132 0 : Ok(())
133 0 : }
134 :
135 : /// Implementation of snapshot for a timeline which is resident (includes some segment data)
136 0 : pub async fn stream_snapshot_resident_guts(
137 0 : tli: WalResidentTimeline,
138 0 : source: NodeId,
139 0 : destination: NodeId,
140 0 : tx: mpsc::Sender<Result<Bytes>>,
141 0 : ) -> Result<()> {
142 0 : let mut ar = prepare_tar_stream(tx);
143 :
144 0 : let bctx = tli.start_snapshot(&mut ar, source, destination).await?;
145 0 : pausable_failpoint!("sk-snapshot-after-list-pausable");
146 :
147 0 : let tli_dir = tli.get_timeline_dir();
148 0 : info!(
149 0 : "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
150 0 : bctx.upto_segno - bctx.from_segno + 1,
151 : bctx.from_segno,
152 : bctx.upto_segno,
153 : bctx.term,
154 : bctx.last_log_term,
155 : bctx.flush_lsn,
156 : );
157 0 : for segno in bctx.from_segno..=bctx.upto_segno {
158 0 : let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?;
159 0 : let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
160 0 : if is_partial {
161 0 : wal_file_name.push_str(".partial");
162 0 : }
163 0 : ar.append_file(&wal_file_name, &mut sf).await?;
164 : }
165 :
166 : // Do the term check before ar.finish to make archive corrupted in case of
167 : // term change. Client shouldn't ignore abrupt stream end, but to be sure.
168 0 : tli.finish_snapshot(&bctx).await?;
169 :
170 0 : ar.finish().await?;
171 :
172 0 : Ok(())
173 0 : }
174 :
175 : impl Timeline {
176 : /// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
177 : /// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
178 : /// we are offloaded and there aren't any)
179 0 : async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
180 0 : self: &Arc<Timeline>,
181 0 : ar: &mut tokio_tar::Builder<W>,
182 0 : source: NodeId,
183 0 : destination: NodeId,
184 0 : ) -> Result<()> {
185 : // Take initial copy of control file, then release state lock
186 0 : let mut control_file = {
187 0 : let shared_state = self.write_shared_state().await;
188 :
189 0 : let control_file = TimelinePersistentState::clone(shared_state.sk.state());
190 :
191 : // Rare race: we got unevicted between entering function and reading control file.
192 : // We error out and let API caller retry.
193 0 : if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
194 0 : bail!("Timeline was un-evicted during snapshot, please retry");
195 0 : }
196 0 :
197 0 : control_file
198 : };
199 :
200 : // Modify the partial segment of the in-memory copy for the control file to
201 : // point to the destination safekeeper.
202 0 : let replace = control_file
203 0 : .partial_backup
204 0 : .replace_uploaded_segment(source, destination)?;
205 :
206 0 : let Some(replace) = replace else {
207 : // In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
208 : // has a partial segment. It is unexpected that
209 0 : anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
210 : };
211 :
212 0 : tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
213 :
214 : // Optimistically try to copy the partial segment to the destination's path: this
215 : // can fail if the timeline was un-evicted and modified in the background.
216 0 : let remote_timeline_path = &self.remote_path;
217 0 : wal_backup::copy_partial_segment(
218 0 : &replace.previous.remote_path(remote_timeline_path),
219 0 : &replace.current.remote_path(remote_timeline_path),
220 0 : )
221 0 : .await?;
222 :
223 : // Since the S3 copy succeeded with the path given in our control file snapshot, and
224 : // we are sending that snapshot in our response, we are giving the caller a consistent
225 : // snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
226 0 : let buf = control_file
227 0 : .write_to_buf()
228 0 : .with_context(|| "failed to serialize control store")?;
229 0 : let mut header = Header::new_gnu();
230 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
231 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
232 0 : .await
233 0 : .with_context(|| "failed to append to archive")?;
234 :
235 0 : Ok(())
236 0 : }
237 : }
238 :
239 : impl WalResidentTimeline {
240 : /// Start streaming tar archive with timeline:
241 : /// 1) stream control file under lock;
242 : /// 2) hold off WAL removal;
243 : /// 3) collect SnapshotContext to understand which WAL segments should be
244 : /// streamed.
245 : ///
246 : /// Snapshot streams data up to flush_lsn. To make this safe, we must check
247 : /// that term doesn't change during the procedure, or we risk sending mix of
248 : /// WAL from different histories. Term is remembered in the SnapshotContext
249 : /// and checked in finish_snapshot. Note that in the last segment some WAL
250 : /// higher than flush_lsn set here might be streamed; that's fine as long as
251 : /// terms doesn't change.
252 : ///
253 : /// Alternatively we could send only up to commit_lsn to get some valid
254 : /// state which later will be recovered by compute, in this case term check
255 : /// is not needed, but we likely don't want that as there might be no
256 : /// compute which could perform the recovery.
257 : ///
258 : /// When returned SnapshotContext is dropped WAL hold is removed.
259 0 : async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
260 0 : &self,
261 0 : ar: &mut tokio_tar::Builder<W>,
262 0 : source: NodeId,
263 0 : destination: NodeId,
264 0 : ) -> Result<SnapshotContext> {
265 0 : let mut shared_state = self.write_shared_state().await;
266 0 : let wal_seg_size = shared_state.get_wal_seg_size();
267 0 :
268 0 : let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
269 : // Modify the partial segment of the in-memory copy for the control file to
270 : // point to the destination safekeeper.
271 0 : let replace = control_store
272 0 : .partial_backup
273 0 : .replace_uploaded_segment(source, destination)?;
274 :
275 0 : if let Some(replace) = replace {
276 : // The deserialized control file has an uploaded partial. We upload a copy
277 : // of it to object storage for the destination safekeeper and send an updated
278 : // control file in the snapshot.
279 0 : tracing::info!(
280 0 : "Replacing uploaded partial segment in in-mem control file: {replace:?}"
281 : );
282 :
283 0 : let remote_timeline_path = &self.tli.remote_path;
284 0 : wal_backup::copy_partial_segment(
285 0 : &replace.previous.remote_path(remote_timeline_path),
286 0 : &replace.current.remote_path(remote_timeline_path),
287 0 : )
288 0 : .await?;
289 0 : }
290 :
291 0 : let buf = control_store
292 0 : .write_to_buf()
293 0 : .with_context(|| "failed to serialize control store")?;
294 0 : let mut header = Header::new_gnu();
295 0 : header.set_size(buf.len().try_into().expect("never breaches u64"));
296 0 : ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
297 0 : .await
298 0 : .with_context(|| "failed to append to archive")?;
299 :
300 : // We need to stream since the oldest segment someone (s3 or pageserver)
301 : // still needs. This duplicates calc_horizon_lsn logic.
302 : //
303 : // We know that WAL wasn't removed up to this point because it cannot be
304 : // removed further than `backup_lsn`. Since we're holding shared_state
305 : // lock and setting `wal_removal_on_hold` later, it guarantees that WAL
306 : // won't be removed until we're done.
307 0 : let from_lsn = min(
308 0 : shared_state.sk.state().remote_consistent_lsn,
309 0 : shared_state.sk.state().backup_lsn,
310 0 : );
311 0 : if from_lsn == Lsn::INVALID {
312 : // this is possible if snapshot is called before handling first
313 : // elected message
314 0 : bail!("snapshot is called on uninitialized timeline");
315 0 : }
316 0 : let from_segno = from_lsn.segment_number(wal_seg_size);
317 0 : let term = shared_state.sk.state().acceptor_state.term;
318 0 : let last_log_term = shared_state.sk.last_log_term();
319 0 : let flush_lsn = shared_state.sk.flush_lsn();
320 0 : let upto_segno = flush_lsn.segment_number(wal_seg_size);
321 : // have some limit on max number of segments as a sanity check
322 : const MAX_ALLOWED_SEGS: u64 = 1000;
323 0 : let num_segs = upto_segno - from_segno + 1;
324 0 : if num_segs > MAX_ALLOWED_SEGS {
325 0 : bail!(
326 0 : "snapshot is called on timeline with {} segments, but the limit is {}",
327 0 : num_segs,
328 0 : MAX_ALLOWED_SEGS
329 0 : );
330 0 : }
331 0 :
332 0 : // Prevent WAL removal while we're streaming data.
333 0 : //
334 0 : // Since this a flag, not a counter just bail out if already set; we
335 0 : // shouldn't need concurrent snapshotting.
336 0 : if shared_state.wal_removal_on_hold {
337 0 : bail!("wal_removal_on_hold is already true");
338 0 : }
339 0 : shared_state.wal_removal_on_hold = true;
340 0 :
341 0 : // Drop shared_state to release the lock, before calling wal_residence_guard().
342 0 : drop(shared_state);
343 :
344 0 : let tli_copy = self.wal_residence_guard().await?;
345 0 : let bctx = SnapshotContext {
346 0 : from_segno,
347 0 : upto_segno,
348 0 : term,
349 0 : last_log_term,
350 0 : flush_lsn,
351 0 : wal_seg_size,
352 0 : tli: tli_copy,
353 0 : };
354 0 :
355 0 : Ok(bctx)
356 0 : }
357 :
358 : /// Finish snapshotting: check that term(s) hasn't changed.
359 : ///
360 : /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
361 : /// forget this if snapshotting fails mid the way.
362 0 : pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
363 0 : let shared_state = self.read_shared_state().await;
364 0 : let term = shared_state.sk.state().acceptor_state.term;
365 0 : let last_log_term = shared_state.sk.last_log_term();
366 0 : // There are some cases to relax this check (e.g. last_log_term might
367 0 : // change, but as long as older history is strictly part of new that's
368 0 : // fine), but there is no need to do it.
369 0 : if bctx.term != term || bctx.last_log_term != last_log_term {
370 0 : bail!(
371 0 : "term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
372 0 : bctx.term,
373 0 : bctx.last_log_term,
374 0 : term,
375 0 : last_log_term
376 0 : );
377 0 : }
378 0 : Ok(())
379 0 : }
380 : }
381 :
382 : /// Response for debug dump request.
383 0 : #[derive(Debug, Deserialize)]
384 : pub struct DebugDumpResponse {
385 : pub start_time: DateTime<Utc>,
386 : pub finish_time: DateTime<Utc>,
387 : pub timelines: Vec<debug_dump::Timeline>,
388 : pub timelines_count: usize,
389 : pub config: debug_dump::Config,
390 : }
391 :
392 : /// Find the most advanced safekeeper and pull timeline from it.
393 0 : pub async fn handle_request(
394 0 : request: PullTimelineRequest,
395 0 : sk_auth_token: Option<SecretString>,
396 0 : ssl_ca_cert: Option<Certificate>,
397 0 : global_timelines: Arc<GlobalTimelines>,
398 0 : ) -> Result<PullTimelineResponse> {
399 0 : let existing_tli = global_timelines.get(TenantTimelineId::new(
400 0 : request.tenant_id,
401 0 : request.timeline_id,
402 0 : ));
403 0 : if existing_tli.is_ok() {
404 0 : bail!("Timeline {} already exists", request.timeline_id);
405 0 : }
406 0 :
407 0 : let mut http_client = reqwest::Client::builder();
408 0 : if let Some(ssl_ca_cert) = ssl_ca_cert {
409 0 : http_client = http_client.add_root_certificate(ssl_ca_cert);
410 0 : }
411 0 : let http_client = http_client.build()?;
412 :
413 0 : let http_hosts = request.http_hosts.clone();
414 :
415 : // Figure out statuses of potential donors.
416 0 : let responses: Vec<Result<TimelineStatus, mgmt_api::Error>> =
417 0 : futures::future::join_all(http_hosts.iter().map(|url| async {
418 0 : let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone());
419 0 : let info = cclient
420 0 : .timeline_status(request.tenant_id, request.timeline_id)
421 0 : .await?;
422 0 : Ok(info)
423 0 : }))
424 0 : .await;
425 :
426 0 : let mut statuses = Vec::new();
427 0 : for (i, response) in responses.into_iter().enumerate() {
428 0 : let status = response.context(format!("fetching status from {}", http_hosts[i]))?;
429 0 : statuses.push((status, i));
430 : }
431 :
432 : // Find the most advanced safekeeper
433 0 : let (status, i) = statuses
434 0 : .into_iter()
435 0 : .max_by_key(|(status, _)| {
436 0 : (
437 0 : status.acceptor_state.epoch,
438 0 : status.flush_lsn,
439 0 : status.commit_lsn,
440 0 : )
441 0 : })
442 0 : .unwrap();
443 0 : let safekeeper_host = http_hosts[i].clone();
444 0 :
445 0 : assert!(status.tenant_id == request.tenant_id);
446 0 : assert!(status.timeline_id == request.timeline_id);
447 :
448 0 : pull_timeline(
449 0 : status,
450 0 : safekeeper_host,
451 0 : sk_auth_token,
452 0 : http_client,
453 0 : global_timelines,
454 0 : )
455 0 : .await
456 0 : }
457 :
458 0 : async fn pull_timeline(
459 0 : status: TimelineStatus,
460 0 : host: String,
461 0 : sk_auth_token: Option<SecretString>,
462 0 : http_client: reqwest::Client,
463 0 : global_timelines: Arc<GlobalTimelines>,
464 0 : ) -> Result<PullTimelineResponse> {
465 0 : let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
466 0 : info!(
467 0 : "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
468 : ttid,
469 : host,
470 : status.commit_lsn,
471 : status.flush_lsn,
472 : status.acceptor_state.term,
473 : status.acceptor_state.epoch
474 : );
475 :
476 0 : let conf = &global_timelines.get_global_config();
477 :
478 0 : let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
479 0 : let client = Client::new(http_client, host.clone(), sk_auth_token.clone());
480 : // Request stream with basebackup archive.
481 0 : let bb_resp = client
482 0 : .snapshot(status.tenant_id, status.timeline_id, conf.my_id)
483 0 : .await?;
484 :
485 : // Make Stream of Bytes from it...
486 0 : let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
487 0 : // and turn it into StreamReader implementing AsyncRead.
488 0 : let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
489 :
490 : // Extract it on the fly to the disk. We don't use simple unpack() to fsync
491 : // files.
492 0 : let mut entries = Archive::new(bb_reader).entries()?;
493 0 : while let Some(base_tar_entry) = entries.next().await {
494 0 : let mut entry = base_tar_entry?;
495 0 : let header = entry.header();
496 0 : let file_path = header.path()?.into_owned();
497 0 : match header.entry_type() {
498 : tokio_tar::EntryType::Regular => {
499 0 : let utf8_file_path =
500 0 : Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
501 0 : let dst_path = tli_dir_path.join(utf8_file_path);
502 0 : let mut f = OpenOptions::new()
503 0 : .create(true)
504 0 : .truncate(true)
505 0 : .write(true)
506 0 : .open(&dst_path)
507 0 : .await?;
508 0 : tokio::io::copy(&mut entry, &mut f).await?;
509 : // fsync the file
510 0 : f.sync_all().await?;
511 : }
512 : _ => {
513 0 : bail!(
514 0 : "entry {} in backup tar archive is of unexpected type: {:?}",
515 0 : file_path.display(),
516 0 : header.entry_type()
517 0 : );
518 : }
519 : }
520 : }
521 :
522 : // fsync temp timeline directory to remember its contents.
523 0 : fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
524 :
525 : // Let's create timeline from temp directory and verify that it's correct
526 0 : let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?;
527 0 : info!(
528 0 : "finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
529 : ttid, commit_lsn, flush_lsn
530 : );
531 0 : assert!(status.commit_lsn <= status.flush_lsn);
532 :
533 : // Finally, load the timeline.
534 0 : let _tli = global_timelines
535 0 : .load_temp_timeline(ttid, &tli_dir_path, false)
536 0 : .await?;
537 :
538 0 : Ok(PullTimelineResponse {
539 0 : safekeeper_host: host,
540 0 : })
541 0 : }
|