Line data Source code
1 : //! This module implements Timeline lifecycle management and has all necessary code
2 : //! to glue together SafeKeeper and all other background services.
3 :
4 : use anyhow::{anyhow, bail, Result};
5 : use camino::Utf8PathBuf;
6 : use postgres_ffi::XLogSegNo;
7 : use serde::{Deserialize, Serialize};
8 : use tokio::fs;
9 :
10 : use std::cmp::max;
11 : use std::sync::Arc;
12 : use std::time::Duration;
13 : use tokio::sync::{Mutex, MutexGuard};
14 : use tokio::{
15 : sync::{mpsc::Sender, watch},
16 : time::Instant,
17 : };
18 : use tracing::*;
19 : use utils::http::error::ApiError;
20 : use utils::{
21 : id::{NodeId, TenantTimelineId},
22 : lsn::Lsn,
23 : };
24 :
25 : use storage_broker::proto::SafekeeperTimelineInfo;
26 : use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
27 :
28 : use crate::receive_wal::WalReceivers;
29 : use crate::recovery::{recovery_main, Donor, RecoveryNeededInfo};
30 : use crate::safekeeper::{
31 : AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, ServerInfo, Term, TermLsn,
32 : INVALID_TERM,
33 : };
34 : use crate::send_wal::WalSenders;
35 : use crate::state::{TimelineMemState, TimelinePersistentState};
36 : use crate::wal_backup::{self};
37 : use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION};
38 :
39 : use crate::metrics::FullTimelineInfo;
40 : use crate::wal_storage::Storage as wal_storage_iface;
41 : use crate::{debug_dump, wal_storage};
42 : use crate::{GlobalTimelines, SafeKeeperConf};
43 :
44 : /// Things safekeeper should know about timeline state on peers.
45 12237 : #[derive(Debug, Clone, Serialize, Deserialize)]
46 : pub struct PeerInfo {
47 : pub sk_id: NodeId,
48 : pub term: Term,
49 : /// Term of the last entry.
50 : pub last_log_term: Term,
51 : /// LSN of the last record.
52 : pub flush_lsn: Lsn,
53 : pub commit_lsn: Lsn,
54 : /// Since which LSN safekeeper has WAL. TODO: remove this once we fill new
55 : /// sk since backup_lsn.
56 : pub local_start_lsn: Lsn,
57 : /// When info was received. Serde annotations are not very useful but make
58 : /// the code compile -- we don't rely on this field externally.
59 : #[serde(skip)]
60 : #[serde(default = "Instant::now")]
61 : ts: Instant,
62 : pub pg_connstr: String,
63 : pub http_connstr: String,
64 : }
65 :
66 : impl PeerInfo {
67 10982 : fn from_sk_info(sk_info: &SafekeeperTimelineInfo, ts: Instant) -> PeerInfo {
68 10982 : PeerInfo {
69 10982 : sk_id: NodeId(sk_info.safekeeper_id),
70 10982 : term: sk_info.term,
71 10982 : last_log_term: sk_info.last_log_term,
72 10982 : flush_lsn: Lsn(sk_info.flush_lsn),
73 10982 : commit_lsn: Lsn(sk_info.commit_lsn),
74 10982 : local_start_lsn: Lsn(sk_info.local_start_lsn),
75 10982 : pg_connstr: sk_info.safekeeper_connstr.clone(),
76 10982 : http_connstr: sk_info.http_connstr.clone(),
77 10982 : ts,
78 10982 : }
79 10982 : }
80 : }
81 :
82 : // vector-based node id -> peer state map with very limited functionality we
83 : // need.
84 0 : #[derive(Debug, Clone, Default)]
85 : pub struct PeersInfo(pub Vec<PeerInfo>);
86 :
87 : impl PeersInfo {
88 10982 : fn get(&mut self, id: NodeId) -> Option<&mut PeerInfo> {
89 14610 : self.0.iter_mut().find(|p| p.sk_id == id)
90 10982 : }
91 :
92 10982 : fn upsert(&mut self, p: &PeerInfo) {
93 10982 : match self.get(p.sk_id) {
94 10121 : Some(rp) => *rp = p.clone(),
95 861 : None => self.0.push(p.clone()),
96 : }
97 10982 : }
98 : }
99 :
100 : /// Shared state associated with database instance
101 : pub struct SharedState {
102 : /// Safekeeper object
103 : sk: SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>,
104 : /// In memory list containing state of peers sent in latest messages from them.
105 : peers_info: PeersInfo,
106 : /// True when WAL backup launcher oversees the timeline, making sure WAL is
107 : /// offloaded, allows to bother launcher less.
108 : wal_backup_active: bool,
109 : /// True whenever there is at least some pending activity on timeline: live
110 : /// compute connection, pageserver is not caughtup (it must have latest WAL
111 : /// for new compute start) or WAL backuping is not finished. Practically it
112 : /// means safekeepers broadcast info to peers about the timeline, old WAL is
113 : /// trimmed.
114 : ///
115 : /// TODO: it might be better to remove tli completely from GlobalTimelines
116 : /// when tli is inactive instead of having this flag.
117 : active: bool,
118 : last_removed_segno: XLogSegNo,
119 : }
120 :
121 : impl SharedState {
122 : /// Initialize fresh timeline state without persisting anything to disk.
123 479 : fn create_new(
124 479 : conf: &SafeKeeperConf,
125 479 : ttid: &TenantTimelineId,
126 479 : state: TimelinePersistentState,
127 479 : ) -> Result<Self> {
128 479 : if state.server.wal_seg_size == 0 {
129 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
130 479 : }
131 479 :
132 479 : if state.server.pg_version == UNKNOWN_SERVER_VERSION {
133 0 : bail!(TimelineError::UninitialinzedPgVersion(*ttid));
134 479 : }
135 479 :
136 479 : if state.commit_lsn < state.local_start_lsn {
137 0 : bail!(
138 0 : "commit_lsn {} is higher than local_start_lsn {}",
139 0 : state.commit_lsn,
140 0 : state.local_start_lsn
141 0 : );
142 479 : }
143 479 :
144 479 : // We don't want to write anything to disk, because we may have existing timeline there.
145 479 : // These functions should not change anything on disk.
146 479 : let timeline_dir = conf.timeline_dir(ttid);
147 479 : let control_store = control_file::FileStorage::create_new(timeline_dir, conf, state)?;
148 479 : let wal_store =
149 479 : wal_storage::PhysicalStorage::new(ttid, conf.timeline_dir(ttid), conf, &control_store)?;
150 479 : let sk = SafeKeeper::new(control_store, wal_store, conf.my_id)?;
151 :
152 479 : Ok(Self {
153 479 : sk,
154 479 : peers_info: PeersInfo(vec![]),
155 479 : wal_backup_active: false,
156 479 : active: false,
157 479 : last_removed_segno: 0,
158 479 : })
159 479 : }
160 :
161 : /// Restore SharedState from control file. If file doesn't exist, bails out.
162 137 : fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result<Self> {
163 137 : let control_store = control_file::FileStorage::restore_new(ttid, conf)?;
164 137 : if control_store.server.wal_seg_size == 0 {
165 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
166 137 : }
167 :
168 137 : let wal_store =
169 137 : wal_storage::PhysicalStorage::new(ttid, conf.timeline_dir(ttid), conf, &control_store)?;
170 :
171 : Ok(Self {
172 137 : sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?,
173 137 : peers_info: PeersInfo(vec![]),
174 : wal_backup_active: false,
175 : active: false,
176 : last_removed_segno: 0,
177 : })
178 137 : }
179 :
180 15166 : fn is_active(&self, num_computes: usize) -> bool {
181 15166 : self.is_wal_backup_required(num_computes)
182 : // FIXME: add tracking of relevant pageservers and check them here individually,
183 : // otherwise migration won't work (we suspend too early).
184 2361 : || self.sk.state.inmem.remote_consistent_lsn < self.sk.state.inmem.commit_lsn
185 15166 : }
186 :
187 : /// Mark timeline active/inactive and return whether s3 offloading requires
188 : /// start/stop action. If timeline is deactivated, control file is persisted
189 : /// as maintenance task does that only for active timelines.
190 15166 : async fn update_status(&mut self, num_computes: usize, ttid: TenantTimelineId) -> bool {
191 15166 : let is_active = self.is_active(num_computes);
192 15166 : if self.active != is_active {
193 1545 : info!(
194 1545 : "timeline {} active={} now, remote_consistent_lsn={}, commit_lsn={}",
195 1545 : ttid,
196 1545 : is_active,
197 1545 : self.sk.state.inmem.remote_consistent_lsn,
198 1545 : self.sk.state.inmem.commit_lsn
199 1545 : );
200 1545 : if !is_active {
201 1494 : if let Err(e) = self.sk.state.flush().await {
202 0 : warn!("control file save in update_status failed: {:?}", e);
203 498 : }
204 1047 : }
205 13621 : }
206 15166 : self.active = is_active;
207 15166 : self.is_wal_backup_action_pending(num_computes)
208 15166 : }
209 :
210 : /// Should we run s3 offloading in current state?
211 42805 : fn is_wal_backup_required(&self, num_computes: usize) -> bool {
212 42805 : let seg_size = self.get_wal_seg_size();
213 42805 : num_computes > 0 ||
214 : // Currently only the whole segment is offloaded, so compare segment numbers.
215 11228 : (self.sk.state.inmem.commit_lsn.segment_number(seg_size) >
216 11228 : self.sk.state.inmem.backup_lsn.segment_number(seg_size))
217 42805 : }
218 :
219 : /// Is current state of s3 offloading is not what it ought to be?
220 15166 : fn is_wal_backup_action_pending(&self, num_computes: usize) -> bool {
221 15166 : let res = self.wal_backup_active != self.is_wal_backup_required(num_computes);
222 15166 : if res {
223 12321 : let action_pending = if self.is_wal_backup_required(num_computes) {
224 12257 : "start"
225 : } else {
226 64 : "stop"
227 : };
228 12321 : trace!(
229 0 : "timeline {} s3 offloading action {} pending: num_computes={}, commit_lsn={}, backup_lsn={}",
230 0 : self.sk.state.timeline_id, action_pending, num_computes, self.sk.state.inmem.commit_lsn, self.sk.state.inmem.backup_lsn
231 0 : );
232 2845 : }
233 15166 : res
234 15166 : }
235 :
236 : /// Returns whether s3 offloading is required and sets current status as
237 : /// matching.
238 152 : fn wal_backup_attend(&mut self, num_computes: usize) -> bool {
239 152 : self.wal_backup_active = self.is_wal_backup_required(num_computes);
240 152 : self.wal_backup_active
241 152 : }
242 :
243 42814 : fn get_wal_seg_size(&self) -> usize {
244 42814 : self.sk.state.server.wal_seg_size as usize
245 42814 : }
246 :
247 8012 : fn get_safekeeper_info(
248 8012 : &self,
249 8012 : ttid: &TenantTimelineId,
250 8012 : conf: &SafeKeeperConf,
251 8012 : ) -> SafekeeperTimelineInfo {
252 8012 : SafekeeperTimelineInfo {
253 8012 : safekeeper_id: conf.my_id.0,
254 8012 : tenant_timeline_id: Some(ProtoTenantTimelineId {
255 8012 : tenant_id: ttid.tenant_id.as_ref().to_owned(),
256 8012 : timeline_id: ttid.timeline_id.as_ref().to_owned(),
257 8012 : }),
258 8012 : term: self.sk.state.acceptor_state.term,
259 8012 : last_log_term: self.sk.get_epoch(),
260 8012 : flush_lsn: self.sk.flush_lsn().0,
261 8012 : // note: this value is not flushed to control file yet and can be lost
262 8012 : commit_lsn: self.sk.state.inmem.commit_lsn.0,
263 8012 : remote_consistent_lsn: self.sk.state.inmem.remote_consistent_lsn.0,
264 8012 : peer_horizon_lsn: self.sk.state.inmem.peer_horizon_lsn.0,
265 8012 : safekeeper_connstr: conf
266 8012 : .advertise_pg_addr
267 8012 : .to_owned()
268 8012 : .unwrap_or(conf.listen_pg_addr.clone()),
269 8012 : http_connstr: conf.listen_http_addr.to_owned(),
270 8012 : backup_lsn: self.sk.state.inmem.backup_lsn.0,
271 8012 : local_start_lsn: self.sk.state.local_start_lsn.0,
272 8012 : availability_zone: conf.availability_zone.clone(),
273 8012 : standby_horizon: self.sk.state.inmem.standby_horizon.0,
274 8012 : }
275 8012 : }
276 :
277 : /// Get our latest view of alive peers status on the timeline.
278 : /// We pass our own info through the broker as well, so when we don't have connection
279 : /// to the broker returned vec is empty.
280 507 : fn get_peers(&self, heartbeat_timeout: Duration) -> Vec<PeerInfo> {
281 507 : let now = Instant::now();
282 507 : self.peers_info
283 507 : .0
284 507 : .iter()
285 507 : // Regard peer as absent if we haven't heard from it within heartbeat_timeout.
286 1286 : .filter(|p| now.duration_since(p.ts) <= heartbeat_timeout)
287 507 : .cloned()
288 507 : .collect()
289 507 : }
290 : }
291 :
292 1 : #[derive(Debug, thiserror::Error)]
293 : pub enum TimelineError {
294 : #[error("Timeline {0} was cancelled and cannot be used anymore")]
295 : Cancelled(TenantTimelineId),
296 : #[error("Timeline {0} was not found in global map")]
297 : NotFound(TenantTimelineId),
298 : #[error("Timeline {0} exists on disk, but wasn't loaded on startup")]
299 : Invalid(TenantTimelineId),
300 : #[error("Timeline {0} is already exists")]
301 : AlreadyExists(TenantTimelineId),
302 : #[error("Timeline {0} is not initialized, wal_seg_size is zero")]
303 : UninitializedWalSegSize(TenantTimelineId),
304 : #[error("Timeline {0} is not initialized, pg_version is unknown")]
305 : UninitialinzedPgVersion(TenantTimelineId),
306 : }
307 :
308 : // Convert to HTTP API error.
309 : impl From<TimelineError> for ApiError {
310 0 : fn from(te: TimelineError) -> ApiError {
311 0 : match te {
312 0 : TimelineError::NotFound(ttid) => {
313 0 : ApiError::NotFound(anyhow!("timeline {} not found", ttid).into())
314 : }
315 0 : _ => ApiError::InternalServerError(anyhow!("{}", te)),
316 : }
317 0 : }
318 : }
319 :
320 : /// Timeline struct manages lifecycle (creation, deletion, restore) of a safekeeper timeline.
321 : /// It also holds SharedState and provides mutually exclusive access to it.
322 : pub struct Timeline {
323 : pub ttid: TenantTimelineId,
324 :
325 : /// Sending here asks for wal backup launcher attention (start/stop
326 : /// offloading). Sending ttid instead of concrete command allows to do
327 : /// sending without timeline lock.
328 : pub wal_backup_launcher_tx: Sender<TenantTimelineId>,
329 :
330 : /// Used to broadcast commit_lsn updates to all background jobs.
331 : commit_lsn_watch_tx: watch::Sender<Lsn>,
332 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
333 :
334 : /// Broadcasts (current term, flush_lsn) updates, walsender is interested in
335 : /// them when sending in recovery mode (to walproposer or peers). Note: this
336 : /// is just a notification, WAL reading should always done with lock held as
337 : /// term can change otherwise.
338 : term_flush_lsn_watch_tx: watch::Sender<TermLsn>,
339 : term_flush_lsn_watch_rx: watch::Receiver<TermLsn>,
340 :
341 : /// Safekeeper and other state, that should remain consistent and
342 : /// synchronized with the disk. This is tokio mutex as we write WAL to disk
343 : /// while holding it, ensuring that consensus checks are in order.
344 : mutex: Mutex<SharedState>,
345 : walsenders: Arc<WalSenders>,
346 : walreceivers: Arc<WalReceivers>,
347 :
348 : /// Cancellation channel. Delete/cancel will send `true` here as a cancellation signal.
349 : cancellation_tx: watch::Sender<bool>,
350 :
351 : /// Timeline should not be used after cancellation. Background tasks should
352 : /// monitor this channel and stop eventually after receiving `true` from this channel.
353 : cancellation_rx: watch::Receiver<bool>,
354 :
355 : /// Directory where timeline state is stored.
356 : pub timeline_dir: Utf8PathBuf,
357 : }
358 :
359 : impl Timeline {
360 : /// Load existing timeline from disk.
361 137 : pub fn load_timeline(
362 137 : conf: &SafeKeeperConf,
363 137 : ttid: TenantTimelineId,
364 137 : wal_backup_launcher_tx: Sender<TenantTimelineId>,
365 137 : ) -> Result<Timeline> {
366 137 : let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered();
367 :
368 137 : let shared_state = SharedState::restore(conf, &ttid)?;
369 137 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
370 137 : watch::channel(shared_state.sk.state.commit_lsn);
371 137 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((
372 137 : shared_state.sk.get_term(),
373 137 : shared_state.sk.flush_lsn(),
374 137 : )));
375 137 : let (cancellation_tx, cancellation_rx) = watch::channel(false);
376 137 :
377 137 : Ok(Timeline {
378 137 : ttid,
379 137 : wal_backup_launcher_tx,
380 137 : commit_lsn_watch_tx,
381 137 : commit_lsn_watch_rx,
382 137 : term_flush_lsn_watch_tx,
383 137 : term_flush_lsn_watch_rx,
384 137 : mutex: Mutex::new(shared_state),
385 137 : walsenders: WalSenders::new(),
386 137 : walreceivers: WalReceivers::new(),
387 137 : cancellation_rx,
388 137 : cancellation_tx,
389 137 : timeline_dir: conf.timeline_dir(&ttid),
390 137 : })
391 137 : }
392 :
393 : /// Create a new timeline, which is not yet persisted to disk.
394 479 : pub fn create_empty(
395 479 : conf: &SafeKeeperConf,
396 479 : ttid: TenantTimelineId,
397 479 : wal_backup_launcher_tx: Sender<TenantTimelineId>,
398 479 : server_info: ServerInfo,
399 479 : commit_lsn: Lsn,
400 479 : local_start_lsn: Lsn,
401 479 : ) -> Result<Timeline> {
402 479 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) = watch::channel(Lsn::INVALID);
403 479 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) =
404 479 : watch::channel(TermLsn::from((INVALID_TERM, Lsn::INVALID)));
405 479 : let (cancellation_tx, cancellation_rx) = watch::channel(false);
406 479 : let state =
407 479 : TimelinePersistentState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn);
408 479 :
409 479 : Ok(Timeline {
410 479 : ttid,
411 479 : wal_backup_launcher_tx,
412 479 : commit_lsn_watch_tx,
413 479 : commit_lsn_watch_rx,
414 479 : term_flush_lsn_watch_tx,
415 479 : term_flush_lsn_watch_rx,
416 479 : mutex: Mutex::new(SharedState::create_new(conf, &ttid, state)?),
417 479 : walsenders: WalSenders::new(),
418 479 : walreceivers: WalReceivers::new(),
419 479 : cancellation_rx,
420 479 : cancellation_tx,
421 479 : timeline_dir: conf.timeline_dir(&ttid),
422 : })
423 479 : }
424 :
425 : /// Initialize fresh timeline on disk and start background tasks. If init
426 : /// fails, timeline is cancelled and cannot be used anymore.
427 : ///
428 : /// Init is transactional, so if it fails, created files will be deleted,
429 : /// and state on disk should remain unchanged.
430 479 : pub async fn init_new(
431 479 : self: &Arc<Timeline>,
432 479 : shared_state: &mut MutexGuard<'_, SharedState>,
433 479 : conf: &SafeKeeperConf,
434 479 : ) -> Result<()> {
435 591 : match fs::metadata(&self.timeline_dir).await {
436 : Ok(_) => {
437 : // Timeline directory exists on disk, we should leave state unchanged
438 : // and return error.
439 0 : bail!(TimelineError::Invalid(self.ttid));
440 : }
441 479 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
442 0 : Err(e) => {
443 0 : return Err(e.into());
444 : }
445 : }
446 :
447 : // Create timeline directory.
448 515 : fs::create_dir_all(&self.timeline_dir).await?;
449 :
450 : // Write timeline to disk and start background tasks.
451 1553 : if let Err(e) = shared_state.sk.state.flush().await {
452 : // Bootstrap failed, cancel timeline and remove timeline directory.
453 0 : self.cancel(shared_state);
454 :
455 0 : if let Err(fs_err) = fs::remove_dir_all(&self.timeline_dir).await {
456 0 : warn!(
457 0 : "failed to remove timeline {} directory after bootstrap failure: {}",
458 0 : self.ttid, fs_err
459 0 : );
460 0 : }
461 :
462 0 : return Err(e);
463 479 : }
464 479 : self.bootstrap(conf);
465 479 : Ok(())
466 479 : }
467 :
468 : /// Bootstrap new or existing timeline starting background stasks.
469 616 : pub fn bootstrap(self: &Arc<Timeline>, conf: &SafeKeeperConf) {
470 616 : // Start recovery task which always runs on the timeline.
471 616 : if conf.peer_recovery_enabled {
472 1 : tokio::spawn(recovery_main(self.clone(), conf.clone()));
473 615 : }
474 616 : }
475 :
476 : /// Delete timeline from disk completely, by removing timeline directory.
477 : /// Background timeline activities will stop eventually.
478 : ///
479 : /// Also deletes WAL in s3. Might fail if e.g. s3 is unavailable, but
480 : /// deletion API endpoint is retriable.
481 28 : pub async fn delete(
482 28 : &self,
483 28 : shared_state: &mut MutexGuard<'_, SharedState>,
484 28 : only_local: bool,
485 28 : ) -> Result<(bool, bool)> {
486 28 : let was_active = shared_state.active;
487 28 : self.cancel(shared_state);
488 28 :
489 28 : // TODO: It's better to wait for s3 offloader termination before
490 28 : // removing data from s3. Though since s3 doesn't have transactions it
491 28 : // still wouldn't guarantee absense of data after removal.
492 28 : let conf = GlobalTimelines::get_global_config();
493 28 : if !only_local && conf.is_wal_backup_enabled() {
494 : // Note: we concurrently delete remote storage data from multiple
495 : // safekeepers. That's ok, s3 replies 200 if object doesn't exist and we
496 : // do some retries anyway.
497 18 : wal_backup::delete_timeline(&self.ttid).await?;
498 25 : }
499 28 : let dir_existed = delete_dir(&self.timeline_dir).await?;
500 28 : Ok((dir_existed, was_active))
501 28 : }
502 :
503 : /// Cancel timeline to prevent further usage. Background tasks will stop
504 : /// eventually after receiving cancellation signal.
505 : ///
506 : /// Note that we can't notify backup launcher here while holding
507 : /// shared_state lock, as this is a potential deadlock: caller is
508 : /// responsible for that. Generally we should probably make WAL backup tasks
509 : /// to shut down on their own, checking once in a while whether it is the
510 : /// time.
511 28 : fn cancel(&self, shared_state: &mut MutexGuard<'_, SharedState>) {
512 28 : info!("timeline {} is cancelled", self.ttid);
513 28 : let _ = self.cancellation_tx.send(true);
514 28 : // Close associated FDs. Nobody will be able to touch timeline data once
515 28 : // it is cancelled, so WAL storage won't be opened again.
516 28 : shared_state.sk.wal_store.close();
517 28 : }
518 :
519 : /// Returns if timeline is cancelled.
520 4311587 : pub fn is_cancelled(&self) -> bool {
521 4311587 : *self.cancellation_rx.borrow()
522 4311587 : }
523 :
524 : /// Returns watch channel which gets value when timeline is cancelled. It is
525 : /// guaranteed to have not cancelled value observed (errors otherwise).
526 1 : pub fn get_cancellation_rx(&self) -> Result<watch::Receiver<bool>> {
527 1 : let rx = self.cancellation_rx.clone();
528 1 : if *rx.borrow() {
529 0 : bail!(TimelineError::Cancelled(self.ttid));
530 1 : }
531 1 : Ok(rx)
532 1 : }
533 :
534 : /// Take a writing mutual exclusive lock on timeline shared_state.
535 5110985 : pub async fn write_shared_state(&self) -> MutexGuard<SharedState> {
536 5110985 : self.mutex.lock().await
537 5110984 : }
538 :
539 15166 : async fn update_status(&self, shared_state: &mut SharedState) -> bool {
540 15166 : shared_state
541 15166 : .update_status(self.walreceivers.get_num(), self.ttid)
542 1494 : .await
543 15166 : }
544 :
545 : /// Update timeline status and kick wal backup launcher to stop/start offloading if needed.
546 4184 : pub async fn update_status_notify(&self) -> Result<()> {
547 4184 : if self.is_cancelled() {
548 0 : bail!(TimelineError::Cancelled(self.ttid));
549 4184 : }
550 4184 : let is_wal_backup_action_pending: bool = {
551 4184 : let mut shared_state = self.write_shared_state().await;
552 4184 : self.update_status(&mut shared_state).await
553 : };
554 4184 : if is_wal_backup_action_pending {
555 : // Can fail only if channel to a static thread got closed, which is not normal at all.
556 2560 : self.wal_backup_launcher_tx.send(self.ttid).await?;
557 1624 : }
558 4184 : Ok(())
559 4184 : }
560 :
561 : /// Returns true if walsender should stop sending WAL to pageserver. We
562 : /// terminate it if remote_consistent_lsn reached commit_lsn and there is no
563 : /// computes. While there might be nothing to stream already, we learn about
564 : /// remote_consistent_lsn update through replication feedback, and we want
565 : /// to stop pushing to the broker if pageserver is fully caughtup.
566 3203 : pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool {
567 3203 : if self.is_cancelled() {
568 0 : return true;
569 3203 : }
570 3203 : let shared_state = self.write_shared_state().await;
571 3203 : if self.walreceivers.get_num() == 0 {
572 907 : return shared_state.sk.state.inmem.commit_lsn == Lsn(0) || // no data at all yet
573 907 : reported_remote_consistent_lsn >= shared_state.sk.state.inmem.commit_lsn;
574 2296 : }
575 2296 : false
576 3203 : }
577 :
578 : /// Ensure taht current term is t, erroring otherwise, and lock the state.
579 2085 : pub async fn acquire_term(&self, t: Term) -> Result<MutexGuard<SharedState>> {
580 2085 : let ss = self.write_shared_state().await;
581 2085 : if ss.sk.state.acceptor_state.term != t {
582 1 : bail!(
583 1 : "failed to acquire term {}, current term {}",
584 1 : t,
585 1 : ss.sk.state.acceptor_state.term
586 1 : );
587 2084 : }
588 2084 : Ok(ss)
589 2085 : }
590 :
591 : /// Returns whether s3 offloading is required and sets current status as
592 : /// matching it.
593 152 : pub async fn wal_backup_attend(&self) -> bool {
594 152 : if self.is_cancelled() {
595 0 : return false;
596 152 : }
597 152 :
598 152 : self.write_shared_state()
599 34 : .await
600 152 : .wal_backup_attend(self.walreceivers.get_num())
601 152 : }
602 :
603 : /// Returns commit_lsn watch channel.
604 759 : pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver<Lsn> {
605 759 : self.commit_lsn_watch_rx.clone()
606 759 : }
607 :
608 : /// Returns term_flush_lsn watch channel.
609 18 : pub fn get_term_flush_lsn_watch_rx(&self) -> watch::Receiver<TermLsn> {
610 18 : self.term_flush_lsn_watch_rx.clone()
611 18 : }
612 :
613 : /// Pass arrived message to the safekeeper.
614 4268379 : pub async fn process_msg(
615 4268379 : &self,
616 4268379 : msg: &ProposerAcceptorMessage,
617 4268379 : ) -> Result<Option<AcceptorProposerMessage>> {
618 4268379 : if self.is_cancelled() {
619 0 : bail!(TimelineError::Cancelled(self.ttid));
620 4268379 : }
621 :
622 : let mut rmsg: Option<AcceptorProposerMessage>;
623 : let commit_lsn: Lsn;
624 : let term_flush_lsn: TermLsn;
625 : {
626 4268379 : let mut shared_state = self.write_shared_state().await;
627 4747923 : rmsg = shared_state.sk.process_msg(msg).await?;
628 :
629 : // if this is AppendResponse, fill in proper pageserver and hot
630 : // standby feedback.
631 4268375 : if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg {
632 1643317 : let (ps_feedback, standby_feedback) = self.walsenders.get_feedbacks();
633 1643317 : resp.hs_feedback = standby_feedback.hs_feedback;
634 1643317 : resp.pageserver_feedback = ps_feedback;
635 1643317 : shared_state.sk.state.inmem.standby_horizon = standby_feedback.reply.apply_lsn;
636 2625058 : }
637 :
638 4268375 : commit_lsn = shared_state.sk.state.inmem.commit_lsn;
639 4268375 : term_flush_lsn =
640 4268375 : TermLsn::from((shared_state.sk.get_term(), shared_state.sk.flush_lsn()));
641 4268375 : }
642 4268375 : self.commit_lsn_watch_tx.send(commit_lsn)?;
643 4268375 : self.term_flush_lsn_watch_tx.send(term_flush_lsn)?;
644 4268375 : Ok(rmsg)
645 4268377 : }
646 :
647 : /// Returns wal_seg_size.
648 9 : pub async fn get_wal_seg_size(&self) -> usize {
649 9 : self.write_shared_state().await.get_wal_seg_size()
650 9 : }
651 :
652 : /// Returns true only if the timeline is loaded and active.
653 10248 : pub async fn is_active(&self) -> bool {
654 10248 : if self.is_cancelled() {
655 0 : return false;
656 10248 : }
657 10248 :
658 10248 : self.write_shared_state().await.active
659 10248 : }
660 :
661 : /// Returns state of the timeline.
662 2843 : pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) {
663 2843 : let state = self.write_shared_state().await;
664 2843 : (state.sk.state.inmem.clone(), state.sk.state.clone())
665 2843 : }
666 :
667 : /// Returns latest backup_lsn.
668 269 : pub async fn get_wal_backup_lsn(&self) -> Lsn {
669 269 : self.write_shared_state().await.sk.state.inmem.backup_lsn
670 269 : }
671 :
672 : /// Sets backup_lsn to the given value.
673 12 : pub async fn set_wal_backup_lsn(&self, backup_lsn: Lsn) -> Result<()> {
674 12 : if self.is_cancelled() {
675 0 : bail!(TimelineError::Cancelled(self.ttid));
676 12 : }
677 :
678 12 : let mut state = self.write_shared_state().await;
679 12 : state.sk.state.inmem.backup_lsn = max(state.sk.state.inmem.backup_lsn, backup_lsn);
680 12 : // we should check whether to shut down offloader, but this will be done
681 12 : // soon by peer communication anyway.
682 12 : Ok(())
683 12 : }
684 :
685 : /// Get safekeeper info for broadcasting to broker and other peers.
686 8012 : pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo {
687 8012 : let shared_state = self.write_shared_state().await;
688 8012 : shared_state.get_safekeeper_info(&self.ttid, conf)
689 8012 : }
690 :
691 : /// Update timeline state with peer safekeeper data.
692 10982 : pub async fn record_safekeeper_info(&self, sk_info: SafekeeperTimelineInfo) -> Result<()> {
693 : let is_wal_backup_action_pending: bool;
694 : let commit_lsn: Lsn;
695 : {
696 10982 : let mut shared_state = self.write_shared_state().await;
697 10982 : shared_state.sk.record_safekeeper_info(&sk_info).await?;
698 10982 : let peer_info = PeerInfo::from_sk_info(&sk_info, Instant::now());
699 10982 : shared_state.peers_info.upsert(&peer_info);
700 10982 : is_wal_backup_action_pending = self.update_status(&mut shared_state).await;
701 10982 : commit_lsn = shared_state.sk.state.inmem.commit_lsn;
702 10982 : }
703 10982 : self.commit_lsn_watch_tx.send(commit_lsn)?;
704 : // Wake up wal backup launcher, if it is time to stop the offloading.
705 10982 : if is_wal_backup_action_pending {
706 9761 : self.wal_backup_launcher_tx.send(self.ttid).await?;
707 1221 : }
708 10982 : Ok(())
709 10982 : }
710 :
711 : /// Update in memory remote consistent lsn.
712 795424 : pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) {
713 795424 : let mut shared_state = self.write_shared_state().await;
714 795423 : shared_state.sk.state.inmem.remote_consistent_lsn =
715 795423 : max(shared_state.sk.state.inmem.remote_consistent_lsn, candidate);
716 795423 : }
717 :
718 505 : pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec<PeerInfo> {
719 505 : let shared_state = self.write_shared_state().await;
720 505 : shared_state.get_peers(conf.heartbeat_timeout)
721 505 : }
722 :
723 : /// Should we start fetching WAL from a peer safekeeper, and if yes, from
724 : /// which? Answer is yes, i.e. .donors is not empty if 1) there is something
725 : /// to fetch, and we can do that without running elections; 2) there is no
726 : /// actively streaming compute, as we don't want to compete with it.
727 : ///
728 : /// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal
729 : /// to its last_log_term so we are sure such a leader ever had been elected.
730 : ///
731 : /// All possible donors are returned so that we could keep connection to the
732 : /// current one if it is good even if it slightly lags behind.
733 : ///
734 : /// Note that term conditions above might be not met, but safekeepers are
735 : /// still not aligned on last flush_lsn. Generally in this case until
736 : /// elections are run it is not possible to say which safekeeper should
737 : /// recover from which one -- history which would be committed is different
738 : /// depending on assembled quorum (e.g. classic picture 8 from Raft paper).
739 : /// Thus we don't try to predict it here.
740 2 : pub async fn recovery_needed(&self, heartbeat_timeout: Duration) -> RecoveryNeededInfo {
741 2 : let ss = self.write_shared_state().await;
742 2 : let term = ss.sk.state.acceptor_state.term;
743 2 : let last_log_term = ss.sk.get_epoch();
744 2 : let flush_lsn = ss.sk.flush_lsn();
745 2 : // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us.
746 2 : let mut peers = ss.get_peers(heartbeat_timeout);
747 2 : // Sort by <last log term, lsn> pairs.
748 3 : peers.sort_by(|p1, p2| {
749 3 : let tl1 = TermLsn {
750 3 : term: p1.last_log_term,
751 3 : lsn: p1.flush_lsn,
752 3 : };
753 3 : let tl2 = TermLsn {
754 3 : term: p2.last_log_term,
755 3 : lsn: p2.flush_lsn,
756 3 : };
757 3 : tl2.cmp(&tl1) // desc
758 3 : });
759 2 : let num_streaming_computes = self.walreceivers.get_num_streaming();
760 2 : let donors = if num_streaming_computes > 0 {
761 0 : vec![] // If there is a streaming compute, don't try to recover to not intervene.
762 : } else {
763 2 : peers
764 2 : .iter()
765 3 : .filter_map(|candidate| {
766 3 : // Are we interested in this candidate?
767 3 : let candidate_tl = TermLsn {
768 3 : term: candidate.last_log_term,
769 3 : lsn: candidate.flush_lsn,
770 3 : };
771 3 : let my_tl = TermLsn {
772 3 : term: last_log_term,
773 3 : lsn: flush_lsn,
774 3 : };
775 3 : if my_tl < candidate_tl {
776 : // Yes, we are interested. Can we pull from it without
777 : // (re)running elections? It is possible if 1) his term
778 : // is equal to his last_log_term so we could act on
779 : // behalf of leader of this term (we must be sure he was
780 : // ever elected) and 2) our term is not higher, or we'll refuse data.
781 2 : if candidate.term == candidate.last_log_term && candidate.term >= term {
782 2 : Some(Donor::from(candidate))
783 : } else {
784 0 : None
785 : }
786 : } else {
787 1 : None
788 : }
789 3 : })
790 2 : .collect()
791 : };
792 2 : RecoveryNeededInfo {
793 2 : term,
794 2 : last_log_term,
795 2 : flush_lsn,
796 2 : peers,
797 2 : num_streaming_computes,
798 2 : donors,
799 2 : }
800 2 : }
801 :
802 1020 : pub fn get_walsenders(&self) -> &Arc<WalSenders> {
803 1020 : &self.walsenders
804 1020 : }
805 :
806 2098 : pub fn get_walreceivers(&self) -> &Arc<WalReceivers> {
807 2098 : &self.walreceivers
808 2098 : }
809 :
810 : /// Returns flush_lsn.
811 535 : pub async fn get_flush_lsn(&self) -> Lsn {
812 535 : self.write_shared_state().await.sk.wal_store.flush_lsn()
813 535 : }
814 :
815 : /// Delete WAL segments from disk that are no longer needed. This is determined
816 : /// based on pageserver's remote_consistent_lsn and local backup_lsn/peer_lsn.
817 1773 : pub async fn remove_old_wal(&self, wal_backup_enabled: bool) -> Result<()> {
818 1773 : if self.is_cancelled() {
819 0 : bail!(TimelineError::Cancelled(self.ttid));
820 1773 : }
821 :
822 : let horizon_segno: XLogSegNo;
823 31 : let remover = {
824 1773 : let shared_state = self.write_shared_state().await;
825 1773 : horizon_segno = shared_state.sk.get_horizon_segno(wal_backup_enabled);
826 1773 : if horizon_segno <= 1 || horizon_segno <= shared_state.last_removed_segno {
827 1742 : return Ok(()); // nothing to do
828 31 : }
829 31 :
830 31 : // release the lock before removing
831 31 : shared_state.sk.wal_store.remove_up_to(horizon_segno - 1)
832 31 : };
833 31 :
834 31 : // delete old WAL files
835 45 : remover.await?;
836 :
837 : // update last_removed_segno
838 31 : let mut shared_state = self.write_shared_state().await;
839 31 : shared_state.last_removed_segno = horizon_segno;
840 31 : Ok(())
841 1773 : }
842 :
843 : /// Persist control file if there is something to save and enough time
844 : /// passed after the last save. This helps to keep remote_consistent_lsn up
845 : /// to date so that storage nodes restart doesn't cause many pageserver ->
846 : /// safekeeper reconnections.
847 1773 : pub async fn maybe_persist_control_file(&self) -> Result<()> {
848 1773 : self.write_shared_state()
849 44 : .await
850 : .sk
851 1773 : .maybe_persist_inmem_control_file()
852 0 : .await
853 1773 : }
854 :
855 : /// Gather timeline data for metrics. If the timeline is not active, returns
856 : /// None, we do not collect these.
857 51 : pub async fn info_for_metrics(&self) -> Option<FullTimelineInfo> {
858 51 : if self.is_cancelled() {
859 0 : return None;
860 51 : }
861 51 :
862 51 : let ps_feedback = self.walsenders.get_ps_feedback();
863 51 : let state = self.write_shared_state().await;
864 51 : if state.active {
865 51 : Some(FullTimelineInfo {
866 51 : ttid: self.ttid,
867 51 : ps_feedback,
868 51 : wal_backup_active: state.wal_backup_active,
869 51 : timeline_is_active: state.active,
870 51 : num_computes: self.walreceivers.get_num() as u32,
871 51 : last_removed_segno: state.last_removed_segno,
872 51 : epoch_start_lsn: state.sk.epoch_start_lsn,
873 51 : mem_state: state.sk.state.inmem.clone(),
874 51 : persisted_state: state.sk.state.clone(),
875 51 : flush_lsn: state.sk.wal_store.flush_lsn(),
876 51 : wal_storage: state.sk.wal_store.get_metrics(),
877 51 : })
878 : } else {
879 0 : None
880 : }
881 51 : }
882 :
883 : /// Returns in-memory timeline state to build a full debug dump.
884 5 : pub async fn memory_dump(&self) -> debug_dump::Memory {
885 5 : let state = self.write_shared_state().await;
886 :
887 5 : let (write_lsn, write_record_lsn, flush_lsn, file_open) =
888 5 : state.sk.wal_store.internal_state();
889 5 :
890 5 : debug_dump::Memory {
891 5 : is_cancelled: self.is_cancelled(),
892 5 : peers_info_len: state.peers_info.0.len(),
893 5 : walsenders: self.walsenders.get_all(),
894 5 : wal_backup_active: state.wal_backup_active,
895 5 : active: state.active,
896 5 : num_computes: self.walreceivers.get_num() as u32,
897 5 : last_removed_segno: state.last_removed_segno,
898 5 : epoch_start_lsn: state.sk.epoch_start_lsn,
899 5 : mem_state: state.sk.state.inmem.clone(),
900 5 : write_lsn,
901 5 : write_record_lsn,
902 5 : flush_lsn,
903 5 : file_open,
904 5 : }
905 5 : }
906 :
907 : /// Apply a function to the control file state and persist it.
908 1 : pub async fn map_control_file<T>(
909 1 : &self,
910 1 : f: impl FnOnce(&mut TimelinePersistentState) -> Result<T>,
911 1 : ) -> Result<T> {
912 1 : let mut state = self.write_shared_state().await;
913 1 : let mut persistent_state = state.sk.state.start_change();
914 : // If f returns error, we abort the change and don't persist anything.
915 1 : let res = f(&mut persistent_state)?;
916 : // If persisting fails, we abort the change and return error.
917 3 : state.sk.state.finish_change(&persistent_state).await?;
918 1 : Ok(res)
919 1 : }
920 : }
921 :
922 : /// Deletes directory and it's contents. Returns false if directory does not exist.
923 28 : async fn delete_dir(path: &Utf8PathBuf) -> Result<bool> {
924 28 : match fs::remove_dir_all(path).await {
925 14 : Ok(_) => Ok(true),
926 14 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
927 0 : Err(e) => Err(e.into()),
928 : }
929 28 : }
|