Line data Source code
1 : //! This module implements Timeline lifecycle management and has all necessary code
2 : //! to glue together SafeKeeper and all other background services.
3 :
4 : use std::cmp::max;
5 : use std::ops::{Deref, DerefMut};
6 : use std::sync::Arc;
7 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
8 : use std::time::Duration;
9 :
10 : use anyhow::{Result, anyhow, bail};
11 : use camino::{Utf8Path, Utf8PathBuf};
12 : use http_utils::error::ApiError;
13 : use remote_storage::RemotePath;
14 : use safekeeper_api::Term;
15 : use safekeeper_api::membership::Configuration;
16 : use safekeeper_api::models::{
17 : PeerInfo, TimelineMembershipSwitchResponse, TimelineTermBumpResponse,
18 : };
19 : use storage_broker::proto::{SafekeeperTimelineInfo, TenantTimelineId as ProtoTenantTimelineId};
20 : use tokio::fs::{self};
21 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard, watch};
22 : use tokio::time::Instant;
23 : use tokio_util::sync::CancellationToken;
24 : use tracing::*;
25 : use utils::id::{NodeId, TenantId, TenantTimelineId};
26 : use utils::lsn::Lsn;
27 : use utils::sync::gate::Gate;
28 :
29 : use crate::metrics::{FullTimelineInfo, MISC_OPERATION_SECONDS, WalStorageMetrics};
30 : use crate::rate_limit::RateLimiter;
31 : use crate::receive_wal::WalReceivers;
32 : use crate::safekeeper::{AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, TermLsn};
33 : use crate::send_wal::{WalSenders, WalSendersTimelineMetricValues};
34 : use crate::state::{EvictionState, TimelineMemState, TimelinePersistentState, TimelineState};
35 : use crate::timeline_guard::ResidenceGuard;
36 : use crate::timeline_manager::{AtomicStatus, ManagerCtl};
37 : use crate::timelines_set::TimelinesSet;
38 : use crate::wal_backup;
39 : use crate::wal_backup::{WalBackup, remote_timeline_path};
40 : use crate::wal_backup_partial::PartialRemoteSegment;
41 : use crate::wal_storage::{Storage as wal_storage_iface, WalReader};
42 : use crate::{SafeKeeperConf, control_file, debug_dump, timeline_manager, wal_storage};
43 :
44 0 : fn peer_info_from_sk_info(sk_info: &SafekeeperTimelineInfo, ts: Instant) -> PeerInfo {
45 0 : PeerInfo {
46 0 : sk_id: NodeId(sk_info.safekeeper_id),
47 0 : term: sk_info.term,
48 0 : last_log_term: sk_info.last_log_term,
49 0 : flush_lsn: Lsn(sk_info.flush_lsn),
50 0 : commit_lsn: Lsn(sk_info.commit_lsn),
51 0 : local_start_lsn: Lsn(sk_info.local_start_lsn),
52 0 : pg_connstr: sk_info.safekeeper_connstr.clone(),
53 0 : http_connstr: sk_info.http_connstr.clone(),
54 0 : https_connstr: sk_info.https_connstr.clone(),
55 0 : ts,
56 0 : }
57 0 : }
58 :
59 : // vector-based node id -> peer state map with very limited functionality we
60 : // need.
61 : #[derive(Debug, Clone, Default)]
62 : pub struct PeersInfo(pub Vec<PeerInfo>);
63 :
64 : impl PeersInfo {
65 0 : fn get(&mut self, id: NodeId) -> Option<&mut PeerInfo> {
66 0 : self.0.iter_mut().find(|p| p.sk_id == id)
67 0 : }
68 :
69 0 : fn upsert(&mut self, p: &PeerInfo) {
70 0 : match self.get(p.sk_id) {
71 0 : Some(rp) => *rp = p.clone(),
72 0 : None => self.0.push(p.clone()),
73 : }
74 0 : }
75 : }
76 :
77 : pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>;
78 :
79 : /// WriteGuardSharedState is a wrapper around `RwLockWriteGuard<SharedState>` that
80 : /// automatically updates `watch::Sender` channels with state on drop.
81 : pub struct WriteGuardSharedState<'a> {
82 : tli: Arc<Timeline>,
83 : guard: RwLockWriteGuard<'a, SharedState>,
84 : }
85 :
86 : impl<'a> WriteGuardSharedState<'a> {
87 1254 : fn new(tli: Arc<Timeline>, guard: RwLockWriteGuard<'a, SharedState>) -> Self {
88 1254 : WriteGuardSharedState { tli, guard }
89 1254 : }
90 : }
91 :
92 : impl Deref for WriteGuardSharedState<'_> {
93 : type Target = SharedState;
94 :
95 0 : fn deref(&self) -> &Self::Target {
96 0 : &self.guard
97 0 : }
98 : }
99 :
100 : impl DerefMut for WriteGuardSharedState<'_> {
101 1249 : fn deref_mut(&mut self) -> &mut Self::Target {
102 1249 : &mut self.guard
103 1249 : }
104 : }
105 :
106 : impl Drop for WriteGuardSharedState<'_> {
107 1254 : fn drop(&mut self) {
108 1254 : let term_flush_lsn =
109 1254 : TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn()));
110 1254 : let commit_lsn = self.guard.sk.state().inmem.commit_lsn;
111 1254 :
112 1254 : let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| {
113 1254 : if *old != term_flush_lsn {
114 620 : *old = term_flush_lsn;
115 620 : true
116 : } else {
117 634 : false
118 : }
119 1254 : });
120 1254 :
121 1254 : let _ = self.tli.commit_lsn_watch_tx.send_if_modified(|old| {
122 1254 : if *old != commit_lsn {
123 615 : *old = commit_lsn;
124 615 : true
125 : } else {
126 639 : false
127 : }
128 1254 : });
129 1254 :
130 1254 : // send notification about shared state update
131 1254 : self.tli.shared_state_version_tx.send_modify(|old| {
132 1254 : *old += 1;
133 1254 : });
134 1254 : }
135 : }
136 :
137 : /// This structure is stored in shared state and represents the state of the timeline.
138 : ///
139 : /// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this
140 : /// case, SafeKeeper is not available (because WAL is not present on disk) and all
141 : /// operations can be done only with control file.
142 : #[allow(clippy::large_enum_variant, reason = "TODO")]
143 : pub enum StateSK {
144 : Loaded(SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>),
145 : Offloaded(Box<TimelineState<control_file::FileStorage>>),
146 : // Not used, required for moving between states.
147 : Empty,
148 : }
149 :
150 : impl StateSK {
151 2604 : pub fn flush_lsn(&self) -> Lsn {
152 2604 : match self {
153 2604 : StateSK::Loaded(sk) => sk.wal_store.flush_lsn(),
154 0 : StateSK::Offloaded(state) => match state.eviction_state {
155 0 : EvictionState::Offloaded(flush_lsn) => flush_lsn,
156 0 : _ => panic!("StateSK::Offloaded mismatches with eviction_state from control_file"),
157 : },
158 0 : StateSK::Empty => unreachable!(),
159 : }
160 2604 : }
161 :
162 : /// Get a reference to the control file's timeline state.
163 2637 : pub fn state(&self) -> &TimelineState<control_file::FileStorage> {
164 2637 : match self {
165 2637 : StateSK::Loaded(sk) => &sk.state,
166 0 : StateSK::Offloaded(s) => s,
167 0 : StateSK::Empty => unreachable!(),
168 : }
169 2637 : }
170 :
171 9 : pub fn state_mut(&mut self) -> &mut TimelineState<control_file::FileStorage> {
172 9 : match self {
173 9 : StateSK::Loaded(sk) => &mut sk.state,
174 0 : StateSK::Offloaded(s) => s,
175 0 : StateSK::Empty => unreachable!(),
176 : }
177 9 : }
178 :
179 1302 : pub fn last_log_term(&self) -> Term {
180 1302 : self.state()
181 1302 : .acceptor_state
182 1302 : .get_last_log_term(self.flush_lsn())
183 1302 : }
184 :
185 0 : pub async fn term_bump(&mut self, to: Option<Term>) -> Result<TimelineTermBumpResponse> {
186 0 : self.state_mut().term_bump(to).await
187 0 : }
188 :
189 0 : pub async fn membership_switch(
190 0 : &mut self,
191 0 : to: Configuration,
192 0 : ) -> Result<TimelineMembershipSwitchResponse> {
193 0 : self.state_mut().membership_switch(to).await
194 0 : }
195 :
196 : /// Close open WAL files to release FDs.
197 0 : fn close_wal_store(&mut self) {
198 0 : if let StateSK::Loaded(sk) = self {
199 0 : sk.wal_store.close();
200 0 : }
201 0 : }
202 :
203 : /// Update timeline state with peer safekeeper data.
204 0 : pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
205 0 : // update commit_lsn if safekeeper is loaded
206 0 : match self {
207 0 : StateSK::Loaded(sk) => sk.record_safekeeper_info(sk_info).await?,
208 0 : StateSK::Offloaded(_) => {}
209 0 : StateSK::Empty => unreachable!(),
210 : }
211 :
212 : // update everything else, including remote_consistent_lsn and backup_lsn
213 0 : let mut sync_control_file = false;
214 0 : let state = self.state_mut();
215 0 : let wal_seg_size = state.server.wal_seg_size as u64;
216 0 :
217 0 : state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), state.inmem.backup_lsn);
218 0 : sync_control_file |= state.backup_lsn + wal_seg_size < state.inmem.backup_lsn;
219 0 :
220 0 : state.inmem.remote_consistent_lsn = max(
221 0 : Lsn(sk_info.remote_consistent_lsn),
222 0 : state.inmem.remote_consistent_lsn,
223 0 : );
224 0 : sync_control_file |=
225 0 : state.remote_consistent_lsn + wal_seg_size < state.inmem.remote_consistent_lsn;
226 0 :
227 0 : state.inmem.peer_horizon_lsn =
228 0 : max(Lsn(sk_info.peer_horizon_lsn), state.inmem.peer_horizon_lsn);
229 0 : sync_control_file |= state.peer_horizon_lsn + wal_seg_size < state.inmem.peer_horizon_lsn;
230 0 :
231 0 : if sync_control_file {
232 0 : state.flush().await?;
233 0 : }
234 0 : Ok(())
235 0 : }
236 :
237 : /// Previously known as epoch_start_lsn. Needed only for reference in some APIs.
238 0 : pub fn term_start_lsn(&self) -> Lsn {
239 0 : match self {
240 0 : StateSK::Loaded(sk) => sk.term_start_lsn,
241 0 : StateSK::Offloaded(_) => Lsn(0),
242 0 : StateSK::Empty => unreachable!(),
243 : }
244 0 : }
245 :
246 : /// Used for metrics only.
247 0 : pub fn wal_storage_metrics(&self) -> WalStorageMetrics {
248 0 : match self {
249 0 : StateSK::Loaded(sk) => sk.wal_store.get_metrics(),
250 0 : StateSK::Offloaded(_) => WalStorageMetrics::default(),
251 0 : StateSK::Empty => unreachable!(),
252 : }
253 0 : }
254 :
255 : /// Returns WAL storage internal LSNs for debug dump.
256 0 : pub fn wal_storage_internal_state(&self) -> (Lsn, Lsn, Lsn, bool) {
257 0 : match self {
258 0 : StateSK::Loaded(sk) => sk.wal_store.internal_state(),
259 : StateSK::Offloaded(_) => {
260 0 : let flush_lsn = self.flush_lsn();
261 0 : (flush_lsn, flush_lsn, flush_lsn, false)
262 : }
263 0 : StateSK::Empty => unreachable!(),
264 : }
265 0 : }
266 :
267 : /// Access to SafeKeeper object. Panics if offloaded, should be good to use from WalResidentTimeline.
268 1240 : pub fn safekeeper(
269 1240 : &mut self,
270 1240 : ) -> &mut SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage> {
271 1240 : match self {
272 1240 : StateSK::Loaded(sk) => sk,
273 : StateSK::Offloaded(_) => {
274 0 : panic!("safekeeper is offloaded, cannot be used")
275 : }
276 0 : StateSK::Empty => unreachable!(),
277 : }
278 1240 : }
279 :
280 : /// Moves control file's state structure out of the enum. Used to switch states.
281 0 : fn take_state(self) -> TimelineState<control_file::FileStorage> {
282 0 : match self {
283 0 : StateSK::Loaded(sk) => sk.state,
284 0 : StateSK::Offloaded(state) => *state,
285 0 : StateSK::Empty => unreachable!(),
286 : }
287 0 : }
288 : }
289 :
290 : /// Shared state associated with database instance
291 : pub struct SharedState {
292 : /// Safekeeper object
293 : pub(crate) sk: StateSK,
294 : /// In memory list containing state of peers sent in latest messages from them.
295 : pub(crate) peers_info: PeersInfo,
296 : // True value hinders old WAL removal; this is used by snapshotting. We
297 : // could make it a counter, but there is no need to.
298 : pub(crate) wal_removal_on_hold: bool,
299 : }
300 :
301 : impl SharedState {
302 : /// Creates a new SharedState.
303 5 : pub fn new(sk: StateSK) -> Self {
304 5 : Self {
305 5 : sk,
306 5 : peers_info: PeersInfo(vec![]),
307 5 : wal_removal_on_hold: false,
308 5 : }
309 5 : }
310 :
311 : /// Restore SharedState from control file. If file doesn't exist, bails out.
312 0 : pub fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result<Self> {
313 0 : let timeline_dir = get_timeline_dir(conf, ttid);
314 0 : let control_store = control_file::FileStorage::restore_new(&timeline_dir, conf.no_sync)?;
315 0 : if control_store.server.wal_seg_size == 0 {
316 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
317 0 : }
318 :
319 0 : let sk = match control_store.eviction_state {
320 : EvictionState::Present => {
321 0 : let wal_store = wal_storage::PhysicalStorage::new(
322 0 : ttid,
323 0 : &timeline_dir,
324 0 : &control_store,
325 0 : conf.no_sync,
326 0 : )?;
327 0 : StateSK::Loaded(SafeKeeper::new(
328 0 : TimelineState::new(control_store),
329 0 : wal_store,
330 0 : conf.my_id,
331 0 : )?)
332 : }
333 : EvictionState::Offloaded(_) => {
334 0 : StateSK::Offloaded(Box::new(TimelineState::new(control_store)))
335 : }
336 : };
337 :
338 0 : Ok(Self::new(sk))
339 0 : }
340 :
341 5 : pub(crate) fn get_wal_seg_size(&self) -> usize {
342 5 : self.sk.state().server.wal_seg_size as usize
343 5 : }
344 :
345 0 : fn get_safekeeper_info(
346 0 : &self,
347 0 : ttid: &TenantTimelineId,
348 0 : conf: &SafeKeeperConf,
349 0 : standby_apply_lsn: Lsn,
350 0 : ) -> SafekeeperTimelineInfo {
351 0 : SafekeeperTimelineInfo {
352 0 : safekeeper_id: conf.my_id.0,
353 0 : tenant_timeline_id: Some(ProtoTenantTimelineId {
354 0 : tenant_id: ttid.tenant_id.as_ref().to_owned(),
355 0 : timeline_id: ttid.timeline_id.as_ref().to_owned(),
356 0 : }),
357 0 : term: self.sk.state().acceptor_state.term,
358 0 : last_log_term: self.sk.last_log_term(),
359 0 : flush_lsn: self.sk.flush_lsn().0,
360 0 : // note: this value is not flushed to control file yet and can be lost
361 0 : commit_lsn: self.sk.state().inmem.commit_lsn.0,
362 0 : remote_consistent_lsn: self.sk.state().inmem.remote_consistent_lsn.0,
363 0 : peer_horizon_lsn: self.sk.state().inmem.peer_horizon_lsn.0,
364 0 : safekeeper_connstr: conf
365 0 : .advertise_pg_addr
366 0 : .to_owned()
367 0 : .unwrap_or(conf.listen_pg_addr.clone()),
368 0 : http_connstr: conf.listen_http_addr.to_owned(),
369 0 : https_connstr: conf.listen_https_addr.to_owned(),
370 0 : backup_lsn: self.sk.state().inmem.backup_lsn.0,
371 0 : local_start_lsn: self.sk.state().local_start_lsn.0,
372 0 : availability_zone: conf.availability_zone.clone(),
373 0 : standby_horizon: standby_apply_lsn.0,
374 0 : }
375 0 : }
376 :
377 : /// Get our latest view of alive peers status on the timeline.
378 : /// We pass our own info through the broker as well, so when we don't have connection
379 : /// to the broker returned vec is empty.
380 43 : pub(crate) fn get_peers(&self, heartbeat_timeout: Duration) -> Vec<PeerInfo> {
381 43 : let now = Instant::now();
382 43 : self.peers_info
383 43 : .0
384 43 : .iter()
385 43 : // Regard peer as absent if we haven't heard from it within heartbeat_timeout.
386 43 : .filter(|p| now.duration_since(p.ts) <= heartbeat_timeout)
387 43 : .cloned()
388 43 : .collect()
389 43 : }
390 : }
391 :
392 : #[derive(Debug, thiserror::Error)]
393 : pub enum TimelineError {
394 : #[error("Timeline {0} was cancelled and cannot be used anymore")]
395 : Cancelled(TenantTimelineId),
396 : #[error("Timeline {0} was not found in global map")]
397 : NotFound(TenantTimelineId),
398 : #[error("Timeline {0} has been deleted")]
399 : Deleted(TenantTimelineId),
400 : #[error("Timeline {0} creation is in progress")]
401 : CreationInProgress(TenantTimelineId),
402 : #[error("Timeline {0} exists on disk, but wasn't loaded on startup")]
403 : Invalid(TenantTimelineId),
404 : #[error("Timeline {0} is already exists")]
405 : AlreadyExists(TenantTimelineId),
406 : #[error("Timeline {0} is not initialized, wal_seg_size is zero")]
407 : UninitializedWalSegSize(TenantTimelineId),
408 : #[error("Timeline {0} is not initialized, pg_version is unknown")]
409 : UninitialinzedPgVersion(TenantTimelineId),
410 : }
411 :
412 : // Convert to HTTP API error.
413 : impl From<TimelineError> for ApiError {
414 0 : fn from(te: TimelineError) -> ApiError {
415 0 : match te {
416 0 : TimelineError::NotFound(ttid) => {
417 0 : ApiError::NotFound(anyhow!("timeline {} not found", ttid).into())
418 : }
419 0 : _ => ApiError::InternalServerError(anyhow!("{}", te)),
420 : }
421 0 : }
422 : }
423 :
424 : /// We run remote deletion in a background task, this is how it sends its results back.
425 : type RemoteDeletionReceiver = tokio::sync::watch::Receiver<Option<anyhow::Result<()>>>;
426 :
427 : /// Timeline struct manages lifecycle (creation, deletion, restore) of a safekeeper timeline.
428 : /// It also holds SharedState and provides mutually exclusive access to it.
429 : pub struct Timeline {
430 : pub ttid: TenantTimelineId,
431 : pub remote_path: RemotePath,
432 :
433 : /// Used to broadcast commit_lsn updates to all background jobs.
434 : commit_lsn_watch_tx: watch::Sender<Lsn>,
435 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
436 :
437 : /// Broadcasts (current term, flush_lsn) updates, walsender is interested in
438 : /// them when sending in recovery mode (to walproposer or peers). Note: this
439 : /// is just a notification, WAL reading should always done with lock held as
440 : /// term can change otherwise.
441 : term_flush_lsn_watch_tx: watch::Sender<TermLsn>,
442 : term_flush_lsn_watch_rx: watch::Receiver<TermLsn>,
443 :
444 : /// Broadcasts shared state updates.
445 : shared_state_version_tx: watch::Sender<usize>,
446 : shared_state_version_rx: watch::Receiver<usize>,
447 :
448 : /// Safekeeper and other state, that should remain consistent and
449 : /// synchronized with the disk. This is tokio mutex as we write WAL to disk
450 : /// while holding it, ensuring that consensus checks are in order.
451 : mutex: RwLock<SharedState>,
452 : walsenders: Arc<WalSenders>,
453 : walreceivers: Arc<WalReceivers>,
454 : timeline_dir: Utf8PathBuf,
455 : manager_ctl: ManagerCtl,
456 : conf: Arc<SafeKeeperConf>,
457 :
458 : pub(crate) wal_backup: Arc<WalBackup>,
459 :
460 : remote_deletion: std::sync::Mutex<Option<RemoteDeletionReceiver>>,
461 :
462 : /// Hold this gate from code that depends on the Timeline's non-shut-down state. While holding
463 : /// this gate, you must respect [`Timeline::cancel`]
464 : pub(crate) gate: Gate,
465 :
466 : /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires
467 : pub(crate) cancel: CancellationToken,
468 :
469 : // timeline_manager controlled state
470 : pub(crate) broker_active: AtomicBool,
471 : pub(crate) wal_backup_active: AtomicBool,
472 : pub(crate) last_removed_segno: AtomicU64,
473 : pub(crate) mgr_status: AtomicStatus,
474 : }
475 :
476 : impl Timeline {
477 : /// Constructs a new timeline.
478 5 : pub fn new(
479 5 : ttid: TenantTimelineId,
480 5 : timeline_dir: &Utf8Path,
481 5 : remote_path: &RemotePath,
482 5 : shared_state: SharedState,
483 5 : conf: Arc<SafeKeeperConf>,
484 5 : wal_backup: Arc<WalBackup>,
485 5 : ) -> Arc<Self> {
486 5 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
487 5 : watch::channel(shared_state.sk.state().commit_lsn);
488 5 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((
489 5 : shared_state.sk.last_log_term(),
490 5 : shared_state.sk.flush_lsn(),
491 5 : )));
492 5 : let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
493 5 :
494 5 : let walreceivers = WalReceivers::new();
495 5 :
496 5 : Arc::new(Self {
497 5 : ttid,
498 5 : remote_path: remote_path.to_owned(),
499 5 : timeline_dir: timeline_dir.to_owned(),
500 5 : commit_lsn_watch_tx,
501 5 : commit_lsn_watch_rx,
502 5 : term_flush_lsn_watch_tx,
503 5 : term_flush_lsn_watch_rx,
504 5 : shared_state_version_tx,
505 5 : shared_state_version_rx,
506 5 : mutex: RwLock::new(shared_state),
507 5 : walsenders: WalSenders::new(walreceivers.clone()),
508 5 : walreceivers,
509 5 : gate: Default::default(),
510 5 : cancel: CancellationToken::default(),
511 5 : remote_deletion: std::sync::Mutex::new(None),
512 5 : manager_ctl: ManagerCtl::new(),
513 5 : conf,
514 5 : broker_active: AtomicBool::new(false),
515 5 : wal_backup_active: AtomicBool::new(false),
516 5 : last_removed_segno: AtomicU64::new(0),
517 5 : mgr_status: AtomicStatus::new(),
518 5 : wal_backup,
519 5 : })
520 5 : }
521 :
522 : /// Load existing timeline from disk.
523 0 : pub fn load_timeline(
524 0 : conf: Arc<SafeKeeperConf>,
525 0 : ttid: TenantTimelineId,
526 0 : wal_backup: Arc<WalBackup>,
527 0 : ) -> Result<Arc<Timeline>> {
528 0 : let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered();
529 :
530 0 : let shared_state = SharedState::restore(conf.as_ref(), &ttid)?;
531 0 : let timeline_dir = get_timeline_dir(conf.as_ref(), &ttid);
532 0 : let remote_path = remote_timeline_path(&ttid)?;
533 :
534 0 : Ok(Timeline::new(
535 0 : ttid,
536 0 : &timeline_dir,
537 0 : &remote_path,
538 0 : shared_state,
539 0 : conf,
540 0 : wal_backup,
541 0 : ))
542 0 : }
543 :
544 : /// Bootstrap new or existing timeline starting background tasks.
545 5 : pub fn bootstrap(
546 5 : self: &Arc<Timeline>,
547 5 : _shared_state: &mut WriteGuardSharedState<'_>,
548 5 : conf: &SafeKeeperConf,
549 5 : broker_active_set: Arc<TimelinesSet>,
550 5 : partial_backup_rate_limiter: RateLimiter,
551 5 : wal_backup: Arc<WalBackup>,
552 5 : ) {
553 5 : let (tx, rx) = self.manager_ctl.bootstrap_manager();
554 :
555 5 : let Ok(gate_guard) = self.gate.enter() else {
556 : // Init raced with shutdown
557 0 : return;
558 : };
559 :
560 : // Start manager task which will monitor timeline state and update
561 : // background tasks.
562 5 : tokio::spawn({
563 5 : let this = self.clone();
564 5 : let conf = conf.clone();
565 5 : async move {
566 5 : let _gate_guard = gate_guard;
567 5 : timeline_manager::main_task(
568 5 : ManagerTimeline { tli: this },
569 5 : conf,
570 5 : broker_active_set,
571 5 : tx,
572 5 : rx,
573 5 : partial_backup_rate_limiter,
574 5 : wal_backup,
575 5 : )
576 5 : .await
577 5 : }
578 5 : });
579 5 : }
580 :
581 : /// Cancel the timeline, requesting background activity to stop. Closing
582 : /// the `self.gate` waits for that.
583 0 : pub async fn cancel(&self) {
584 0 : info!("timeline {} shutting down", self.ttid);
585 0 : self.cancel.cancel();
586 0 : }
587 :
588 : /// Background timeline activities (which hold Timeline::gate) will no
589 : /// longer run once this function completes. `Self::cancel` must have been
590 : /// already called.
591 0 : pub async fn close(&self) {
592 0 : assert!(self.cancel.is_cancelled());
593 :
594 : // Wait for any concurrent tasks to stop using this timeline, to avoid e.g. attempts
595 : // to read deleted files.
596 0 : self.gate.close().await;
597 0 : }
598 :
599 : /// Delete timeline from disk completely, by removing timeline directory.
600 : ///
601 : /// Also deletes WAL in s3. Might fail if e.g. s3 is unavailable, but
602 : /// deletion API endpoint is retriable.
603 : ///
604 : /// Timeline must be in shut-down state (i.e. call [`Self::close`] first)
605 0 : pub async fn delete(
606 0 : &self,
607 0 : shared_state: &mut WriteGuardSharedState<'_>,
608 0 : only_local: bool,
609 0 : ) -> Result<bool> {
610 0 : // Assert that [`Self::close`] was already called
611 0 : assert!(self.cancel.is_cancelled());
612 0 : assert!(self.gate.close_complete());
613 :
614 0 : info!("deleting timeline {} from disk", self.ttid);
615 :
616 : // Close associated FDs. Nobody will be able to touch timeline data once
617 : // it is cancelled, so WAL storage won't be opened again.
618 0 : shared_state.sk.close_wal_store();
619 0 :
620 0 : if !only_local {
621 0 : self.remote_delete().await?;
622 0 : }
623 :
624 0 : let dir_existed = delete_dir(&self.timeline_dir).await?;
625 0 : Ok(dir_existed)
626 0 : }
627 :
628 : /// Delete timeline content from remote storage. If the returned future is dropped,
629 : /// deletion will continue in the background.
630 : ///
631 : /// This function ordinarily spawns a task and stashes a result receiver into [`Self::remote_deletion`]. If
632 : /// deletion is already happening, it may simply wait for an existing task's result.
633 : ///
634 : /// Note: we concurrently delete remote storage data from multiple
635 : /// safekeepers. That's ok, s3 replies 200 if object doesn't exist and we
636 : /// do some retries anyway.
637 0 : async fn remote_delete(&self) -> Result<()> {
638 : // We will start a background task to do the deletion, so that it proceeds even if our
639 : // API request is dropped. Future requests will see the existing deletion task and wait
640 : // for it to complete.
641 0 : let mut result_rx = {
642 0 : let mut remote_deletion_state = self.remote_deletion.lock().unwrap();
643 0 : let result_rx = if let Some(result_rx) = remote_deletion_state.as_ref() {
644 0 : if let Some(result) = result_rx.borrow().as_ref() {
645 0 : if let Err(e) = result {
646 : // A previous remote deletion failed: we will start a new one
647 0 : tracing::error!("remote deletion failed, will retry ({e})");
648 0 : None
649 : } else {
650 : // A previous remote deletion call already succeeded
651 0 : return Ok(());
652 : }
653 : } else {
654 : // Remote deletion is still in flight
655 0 : Some(result_rx.clone())
656 : }
657 : } else {
658 : // Remote deletion was not attempted yet, start it now.
659 0 : None
660 : };
661 :
662 0 : match result_rx {
663 0 : Some(result_rx) => result_rx,
664 0 : None => self.start_remote_delete(&mut remote_deletion_state),
665 : }
666 : };
667 :
668 : // Wait for a result
669 0 : let Ok(result) = result_rx.wait_for(|v| v.is_some()).await else {
670 : // Unexpected: sender should always send a result before dropping the channel, even if it has an error
671 0 : return Err(anyhow::anyhow!(
672 0 : "remote deletion task future was dropped without sending a result"
673 0 : ));
674 : };
675 :
676 0 : result
677 0 : .as_ref()
678 0 : .expect("We did a wait_for on this being Some above")
679 0 : .as_ref()
680 0 : .map(|_| ())
681 0 : .map_err(|e| anyhow::anyhow!("remote deletion failed: {e}"))
682 0 : }
683 :
684 : /// Spawn background task to do remote deletion, return a receiver for its outcome
685 0 : fn start_remote_delete(
686 0 : &self,
687 0 : guard: &mut std::sync::MutexGuard<Option<RemoteDeletionReceiver>>,
688 0 : ) -> RemoteDeletionReceiver {
689 0 : tracing::info!("starting remote deletion");
690 0 : let storage = self.wal_backup.get_storage().clone();
691 0 : let (result_tx, result_rx) = tokio::sync::watch::channel(None);
692 0 : let ttid = self.ttid;
693 0 : tokio::task::spawn(
694 0 : async move {
695 0 : let r = if let Some(storage) = storage {
696 0 : wal_backup::delete_timeline(&storage, &ttid).await
697 : } else {
698 0 : tracing::info!(
699 0 : "skipping remote deletion because no remote storage is configured; this effectively leaks the objects in remote storage"
700 : );
701 0 : Ok(())
702 : };
703 :
704 0 : if let Err(e) = &r {
705 : // Log error here in case nobody ever listens for our result (e.g. dropped API request)
706 0 : tracing::error!("remote deletion failed: {e}");
707 0 : }
708 :
709 : // Ignore send results: it's legal for the Timeline to give up waiting for us.
710 0 : let _ = result_tx.send(Some(r));
711 0 : }
712 0 : .instrument(info_span!("remote_delete", timeline = %self.ttid)),
713 : );
714 :
715 0 : **guard = Some(result_rx.clone());
716 0 :
717 0 : result_rx
718 0 : }
719 :
720 : /// Returns if timeline is cancelled.
721 2505 : pub fn is_cancelled(&self) -> bool {
722 2505 : self.cancel.is_cancelled()
723 2505 : }
724 :
725 : /// Take a writing mutual exclusive lock on timeline shared_state.
726 1254 : pub async fn write_shared_state(self: &Arc<Self>) -> WriteGuardSharedState<'_> {
727 1254 : WriteGuardSharedState::new(self.clone(), self.mutex.write().await)
728 1254 : }
729 :
730 62 : pub async fn read_shared_state(&self) -> ReadGuardSharedState {
731 62 : self.mutex.read().await
732 62 : }
733 :
734 : /// Returns commit_lsn watch channel.
735 5 : pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver<Lsn> {
736 5 : self.commit_lsn_watch_rx.clone()
737 5 : }
738 :
739 : /// Returns term_flush_lsn watch channel.
740 0 : pub fn get_term_flush_lsn_watch_rx(&self) -> watch::Receiver<TermLsn> {
741 0 : self.term_flush_lsn_watch_rx.clone()
742 0 : }
743 :
744 : /// Returns watch channel for SharedState update version.
745 5 : pub fn get_state_version_rx(&self) -> watch::Receiver<usize> {
746 5 : self.shared_state_version_rx.clone()
747 5 : }
748 :
749 : /// Returns wal_seg_size.
750 5 : pub async fn get_wal_seg_size(&self) -> usize {
751 5 : self.read_shared_state().await.get_wal_seg_size()
752 5 : }
753 :
754 : /// Returns state of the timeline.
755 9 : pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) {
756 9 : let state = self.read_shared_state().await;
757 9 : (
758 9 : state.sk.state().inmem.clone(),
759 9 : TimelinePersistentState::clone(state.sk.state()),
760 9 : )
761 9 : }
762 :
763 : /// Returns latest backup_lsn.
764 0 : pub async fn get_wal_backup_lsn(&self) -> Lsn {
765 0 : self.read_shared_state().await.sk.state().inmem.backup_lsn
766 0 : }
767 :
768 : /// Sets backup_lsn to the given value.
769 0 : pub async fn set_wal_backup_lsn(self: &Arc<Self>, backup_lsn: Lsn) -> Result<()> {
770 0 : if self.is_cancelled() {
771 0 : bail!(TimelineError::Cancelled(self.ttid));
772 0 : }
773 :
774 0 : let mut state = self.write_shared_state().await;
775 0 : state.sk.state_mut().inmem.backup_lsn = max(state.sk.state().inmem.backup_lsn, backup_lsn);
776 0 : // we should check whether to shut down offloader, but this will be done
777 0 : // soon by peer communication anyway.
778 0 : Ok(())
779 0 : }
780 :
781 : /// Get safekeeper info for broadcasting to broker and other peers.
782 0 : pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo {
783 0 : let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn;
784 0 : let shared_state = self.read_shared_state().await;
785 0 : shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn)
786 0 : }
787 :
788 : /// Update timeline state with peer safekeeper data.
789 0 : pub async fn record_safekeeper_info(
790 0 : self: &Arc<Self>,
791 0 : sk_info: SafekeeperTimelineInfo,
792 0 : ) -> Result<()> {
793 : {
794 0 : let mut shared_state = self.write_shared_state().await;
795 0 : shared_state.sk.record_safekeeper_info(&sk_info).await?;
796 0 : let peer_info = peer_info_from_sk_info(&sk_info, Instant::now());
797 0 : shared_state.peers_info.upsert(&peer_info);
798 0 : }
799 0 : Ok(())
800 0 : }
801 :
802 0 : pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec<PeerInfo> {
803 0 : let shared_state = self.read_shared_state().await;
804 0 : shared_state.get_peers(conf.heartbeat_timeout)
805 0 : }
806 :
807 5 : pub fn get_walsenders(&self) -> &Arc<WalSenders> {
808 5 : &self.walsenders
809 5 : }
810 :
811 15 : pub fn get_walreceivers(&self) -> &Arc<WalReceivers> {
812 15 : &self.walreceivers
813 15 : }
814 :
815 : /// Returns flush_lsn.
816 0 : pub async fn get_flush_lsn(&self) -> Lsn {
817 0 : self.read_shared_state().await.sk.flush_lsn()
818 0 : }
819 :
820 : /// Gather timeline data for metrics.
821 0 : pub async fn info_for_metrics(&self) -> Option<FullTimelineInfo> {
822 0 : if self.is_cancelled() {
823 0 : return None;
824 0 : }
825 0 :
826 0 : let WalSendersTimelineMetricValues {
827 0 : ps_feedback_counter,
828 0 : last_ps_feedback,
829 0 : interpreted_wal_reader_tasks,
830 0 : } = self.walsenders.info_for_metrics();
831 :
832 0 : let state = self.read_shared_state().await;
833 0 : Some(FullTimelineInfo {
834 0 : ttid: self.ttid,
835 0 : ps_feedback_count: ps_feedback_counter,
836 0 : last_ps_feedback,
837 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
838 0 : timeline_is_active: self.broker_active.load(Ordering::Relaxed),
839 0 : num_computes: self.walreceivers.get_num() as u32,
840 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
841 0 : interpreted_wal_reader_tasks,
842 0 : epoch_start_lsn: state.sk.term_start_lsn(),
843 0 : mem_state: state.sk.state().inmem.clone(),
844 0 : persisted_state: TimelinePersistentState::clone(state.sk.state()),
845 0 : flush_lsn: state.sk.flush_lsn(),
846 0 : wal_storage: state.sk.wal_storage_metrics(),
847 0 : })
848 0 : }
849 :
850 : /// Returns in-memory timeline state to build a full debug dump.
851 0 : pub async fn memory_dump(&self) -> debug_dump::Memory {
852 0 : let state = self.read_shared_state().await;
853 :
854 0 : let (write_lsn, write_record_lsn, flush_lsn, file_open) =
855 0 : state.sk.wal_storage_internal_state();
856 0 :
857 0 : debug_dump::Memory {
858 0 : is_cancelled: self.is_cancelled(),
859 0 : peers_info_len: state.peers_info.0.len(),
860 0 : walsenders: self.walsenders.get_all_public(),
861 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
862 0 : active: self.broker_active.load(Ordering::Relaxed),
863 0 : num_computes: self.walreceivers.get_num() as u32,
864 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
865 0 : epoch_start_lsn: state.sk.term_start_lsn(),
866 0 : mem_state: state.sk.state().inmem.clone(),
867 0 : mgr_status: self.mgr_status.get(),
868 0 : write_lsn,
869 0 : write_record_lsn,
870 0 : flush_lsn,
871 0 : file_open,
872 0 : }
873 0 : }
874 :
875 : /// Apply a function to the control file state and persist it.
876 0 : pub async fn map_control_file<T>(
877 0 : self: &Arc<Self>,
878 0 : f: impl FnOnce(&mut TimelinePersistentState) -> Result<T>,
879 0 : ) -> Result<T> {
880 0 : let mut state = self.write_shared_state().await;
881 0 : let mut persistent_state = state.sk.state_mut().start_change();
882 : // If f returns error, we abort the change and don't persist anything.
883 0 : let res = f(&mut persistent_state)?;
884 : // If persisting fails, we abort the change and return error.
885 0 : state
886 0 : .sk
887 0 : .state_mut()
888 0 : .finish_change(&persistent_state)
889 0 : .await?;
890 0 : Ok(res)
891 0 : }
892 :
893 0 : pub async fn term_bump(self: &Arc<Self>, to: Option<Term>) -> Result<TimelineTermBumpResponse> {
894 0 : let mut state = self.write_shared_state().await;
895 0 : state.sk.term_bump(to).await
896 0 : }
897 :
898 0 : pub async fn membership_switch(
899 0 : self: &Arc<Self>,
900 0 : to: Configuration,
901 0 : ) -> Result<TimelineMembershipSwitchResponse> {
902 0 : let mut state = self.write_shared_state().await;
903 0 : state.sk.membership_switch(to).await
904 0 : }
905 :
906 : /// Guts of [`Self::wal_residence_guard`] and [`Self::try_wal_residence_guard`]
907 10 : async fn do_wal_residence_guard(
908 10 : self: &Arc<Self>,
909 10 : block: bool,
910 10 : ) -> Result<Option<WalResidentTimeline>> {
911 10 : let op_label = if block {
912 10 : "wal_residence_guard"
913 : } else {
914 0 : "try_wal_residence_guard"
915 : };
916 :
917 10 : if self.is_cancelled() {
918 0 : bail!(TimelineError::Cancelled(self.ttid));
919 10 : }
920 10 :
921 10 : debug!("requesting WalResidentTimeline guard");
922 10 : let started_at = Instant::now();
923 10 : let status_before = self.mgr_status.get();
924 :
925 : // Wait 30 seconds for the guard to be acquired. It can time out if someone is
926 : // holding the lock (e.g. during `SafeKeeper::process_msg()`) or manager task
927 : // is stuck.
928 10 : let res = tokio::time::timeout_at(started_at + Duration::from_secs(30), async {
929 10 : if block {
930 10 : self.manager_ctl.wal_residence_guard().await.map(Some)
931 : } else {
932 0 : self.manager_ctl.try_wal_residence_guard().await
933 : }
934 10 : })
935 10 : .await;
936 :
937 10 : let guard = match res {
938 10 : Ok(Ok(guard)) => {
939 10 : let finished_at = Instant::now();
940 10 : let elapsed = finished_at - started_at;
941 10 : MISC_OPERATION_SECONDS
942 10 : .with_label_values(&[op_label])
943 10 : .observe(elapsed.as_secs_f64());
944 10 :
945 10 : guard
946 : }
947 0 : Ok(Err(e)) => {
948 0 : warn!(
949 0 : "error acquiring in {op_label}, statuses {:?} => {:?}",
950 0 : status_before,
951 0 : self.mgr_status.get()
952 : );
953 0 : return Err(e);
954 : }
955 : Err(_) => {
956 0 : warn!(
957 0 : "timeout acquiring in {op_label} guard, statuses {:?} => {:?}",
958 0 : status_before,
959 0 : self.mgr_status.get()
960 : );
961 0 : anyhow::bail!("timeout while acquiring WalResidentTimeline guard");
962 : }
963 : };
964 :
965 10 : Ok(guard.map(|g| WalResidentTimeline::new(self.clone(), g)))
966 10 : }
967 :
968 : /// Get the timeline guard for reading/writing WAL files.
969 : /// If WAL files are not present on disk (evicted), they will be automatically
970 : /// downloaded from remote storage. This is done in the manager task, which is
971 : /// responsible for issuing all guards.
972 : ///
973 : /// NB: don't use this function from timeline_manager, it will deadlock.
974 : /// NB: don't use this function while holding shared_state lock.
975 10 : pub async fn wal_residence_guard(self: &Arc<Self>) -> Result<WalResidentTimeline> {
976 10 : self.do_wal_residence_guard(true)
977 10 : .await
978 10 : .map(|m| m.expect("Always get Some in block=true mode"))
979 10 : }
980 :
981 : /// Get the timeline guard for reading/writing WAL files if the timeline is resident,
982 : /// else return None
983 0 : pub(crate) async fn try_wal_residence_guard(
984 0 : self: &Arc<Self>,
985 0 : ) -> Result<Option<WalResidentTimeline>> {
986 0 : self.do_wal_residence_guard(false).await
987 0 : }
988 :
989 0 : pub async fn backup_partial_reset(self: &Arc<Self>) -> Result<Vec<String>> {
990 0 : self.manager_ctl.backup_partial_reset().await
991 0 : }
992 : }
993 :
994 : /// This is a guard that allows to read/write disk timeline state.
995 : /// All tasks that are trying to read/write WAL from disk should use this guard.
996 : pub struct WalResidentTimeline {
997 : pub tli: Arc<Timeline>,
998 : _guard: ResidenceGuard,
999 : }
1000 :
1001 : impl WalResidentTimeline {
1002 15 : pub fn new(tli: Arc<Timeline>, _guard: ResidenceGuard) -> Self {
1003 15 : WalResidentTimeline { tli, _guard }
1004 15 : }
1005 : }
1006 :
1007 : impl Deref for WalResidentTimeline {
1008 : type Target = Arc<Timeline>;
1009 :
1010 5666 : fn deref(&self) -> &Self::Target {
1011 5666 : &self.tli
1012 5666 : }
1013 : }
1014 :
1015 : impl WalResidentTimeline {
1016 : /// Returns true if walsender should stop sending WAL to pageserver. We
1017 : /// terminate it if remote_consistent_lsn reached commit_lsn and there is no
1018 : /// computes. While there might be nothing to stream already, we learn about
1019 : /// remote_consistent_lsn update through replication feedback, and we want
1020 : /// to stop pushing to the broker if pageserver is fully caughtup.
1021 0 : pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool {
1022 0 : if self.is_cancelled() {
1023 0 : return true;
1024 0 : }
1025 0 : let shared_state = self.read_shared_state().await;
1026 0 : if self.walreceivers.get_num() == 0 {
1027 0 : return shared_state.sk.state().inmem.commit_lsn == Lsn(0) || // no data at all yet
1028 0 : reported_remote_consistent_lsn >= shared_state.sk.state().inmem.commit_lsn;
1029 0 : }
1030 0 : false
1031 0 : }
1032 :
1033 : /// Ensure that current term is t, erroring otherwise, and lock the state.
1034 0 : pub async fn acquire_term(&self, t: Term) -> Result<ReadGuardSharedState> {
1035 0 : let ss = self.read_shared_state().await;
1036 0 : if ss.sk.state().acceptor_state.term != t {
1037 0 : bail!(
1038 0 : "failed to acquire term {}, current term {}",
1039 0 : t,
1040 0 : ss.sk.state().acceptor_state.term
1041 0 : );
1042 0 : }
1043 0 : Ok(ss)
1044 0 : }
1045 :
1046 : /// Pass arrived message to the safekeeper.
1047 1240 : pub async fn process_msg(
1048 1240 : &self,
1049 1240 : msg: &ProposerAcceptorMessage,
1050 1240 : ) -> Result<Option<AcceptorProposerMessage>> {
1051 1240 : if self.is_cancelled() {
1052 0 : bail!(TimelineError::Cancelled(self.ttid));
1053 1240 : }
1054 :
1055 : let mut rmsg: Option<AcceptorProposerMessage>;
1056 : {
1057 1240 : let mut shared_state = self.write_shared_state().await;
1058 1240 : rmsg = shared_state.sk.safekeeper().process_msg(msg).await?;
1059 :
1060 : // if this is AppendResponse, fill in proper hot standby feedback.
1061 620 : if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg {
1062 620 : resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback;
1063 620 : }
1064 : }
1065 1240 : Ok(rmsg)
1066 1240 : }
1067 :
1068 9 : pub async fn get_walreader(&self, start_lsn: Lsn) -> Result<WalReader> {
1069 9 : let (_, persisted_state) = self.get_state().await;
1070 :
1071 9 : WalReader::new(
1072 9 : &self.ttid,
1073 9 : self.timeline_dir.clone(),
1074 9 : &persisted_state,
1075 9 : start_lsn,
1076 9 : self.wal_backup.clone(),
1077 9 : )
1078 9 : }
1079 :
1080 0 : pub fn get_timeline_dir(&self) -> Utf8PathBuf {
1081 0 : self.timeline_dir.clone()
1082 0 : }
1083 :
1084 : /// Update in memory remote consistent lsn.
1085 0 : pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) {
1086 0 : let mut shared_state = self.write_shared_state().await;
1087 0 : shared_state.sk.state_mut().inmem.remote_consistent_lsn = max(
1088 0 : shared_state.sk.state().inmem.remote_consistent_lsn,
1089 0 : candidate,
1090 0 : );
1091 0 : }
1092 : }
1093 :
1094 : /// This struct contains methods that are used by timeline manager task.
1095 : pub(crate) struct ManagerTimeline {
1096 : pub(crate) tli: Arc<Timeline>,
1097 : }
1098 :
1099 : impl Deref for ManagerTimeline {
1100 : type Target = Arc<Timeline>;
1101 :
1102 466 : fn deref(&self) -> &Self::Target {
1103 466 : &self.tli
1104 466 : }
1105 : }
1106 :
1107 : impl ManagerTimeline {
1108 0 : pub(crate) fn timeline_dir(&self) -> &Utf8PathBuf {
1109 0 : &self.tli.timeline_dir
1110 0 : }
1111 :
1112 : /// Manager requests this state on startup.
1113 5 : pub(crate) async fn bootstrap_mgr(&self) -> (bool, Option<PartialRemoteSegment>) {
1114 5 : let shared_state = self.read_shared_state().await;
1115 5 : let is_offloaded = matches!(
1116 5 : shared_state.sk.state().eviction_state,
1117 : EvictionState::Offloaded(_)
1118 : );
1119 5 : let partial_backup_uploaded = shared_state.sk.state().partial_backup.uploaded_segment();
1120 5 :
1121 5 : (is_offloaded, partial_backup_uploaded)
1122 5 : }
1123 :
1124 : /// Try to switch state Present->Offloaded.
1125 0 : pub(crate) async fn switch_to_offloaded(
1126 0 : &self,
1127 0 : partial: &PartialRemoteSegment,
1128 0 : ) -> anyhow::Result<()> {
1129 0 : let mut shared = self.write_shared_state().await;
1130 :
1131 : // updating control file
1132 0 : let mut pstate = shared.sk.state_mut().start_change();
1133 :
1134 0 : if !matches!(pstate.eviction_state, EvictionState::Present) {
1135 0 : bail!(
1136 0 : "cannot switch to offloaded state, current state is {:?}",
1137 0 : pstate.eviction_state
1138 0 : );
1139 0 : }
1140 0 :
1141 0 : if partial.flush_lsn != shared.sk.flush_lsn() {
1142 0 : bail!(
1143 0 : "flush_lsn mismatch in partial backup, expected {}, got {}",
1144 0 : shared.sk.flush_lsn(),
1145 0 : partial.flush_lsn
1146 0 : );
1147 0 : }
1148 0 :
1149 0 : if partial.commit_lsn != pstate.commit_lsn {
1150 0 : bail!(
1151 0 : "commit_lsn mismatch in partial backup, expected {}, got {}",
1152 0 : pstate.commit_lsn,
1153 0 : partial.commit_lsn
1154 0 : );
1155 0 : }
1156 0 :
1157 0 : if partial.term != shared.sk.last_log_term() {
1158 0 : bail!(
1159 0 : "term mismatch in partial backup, expected {}, got {}",
1160 0 : shared.sk.last_log_term(),
1161 0 : partial.term
1162 0 : );
1163 0 : }
1164 0 :
1165 0 : pstate.eviction_state = EvictionState::Offloaded(shared.sk.flush_lsn());
1166 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1167 : // control file is now switched to Offloaded state
1168 :
1169 : // now we can switch shared.sk to Offloaded, shouldn't fail
1170 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1171 0 : let cfile_state = prev_sk.take_state();
1172 0 : shared.sk = StateSK::Offloaded(Box::new(cfile_state));
1173 0 :
1174 0 : Ok(())
1175 0 : }
1176 :
1177 : /// Try to switch state Offloaded->Present.
1178 0 : pub(crate) async fn switch_to_present(&self) -> anyhow::Result<()> {
1179 0 : let mut shared = self.write_shared_state().await;
1180 :
1181 : // trying to restore WAL storage
1182 0 : let wal_store = wal_storage::PhysicalStorage::new(
1183 0 : &self.ttid,
1184 0 : &self.timeline_dir,
1185 0 : shared.sk.state(),
1186 0 : self.conf.no_sync,
1187 0 : )?;
1188 :
1189 : // updating control file
1190 0 : let mut pstate = shared.sk.state_mut().start_change();
1191 :
1192 0 : if !matches!(pstate.eviction_state, EvictionState::Offloaded(_)) {
1193 0 : bail!(
1194 0 : "cannot switch to present state, current state is {:?}",
1195 0 : pstate.eviction_state
1196 0 : );
1197 0 : }
1198 0 :
1199 0 : if wal_store.flush_lsn() != shared.sk.flush_lsn() {
1200 0 : bail!(
1201 0 : "flush_lsn mismatch in restored WAL, expected {}, got {}",
1202 0 : shared.sk.flush_lsn(),
1203 0 : wal_store.flush_lsn()
1204 0 : );
1205 0 : }
1206 0 :
1207 0 : pstate.eviction_state = EvictionState::Present;
1208 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1209 :
1210 : // now we can switch shared.sk to Present, shouldn't fail
1211 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1212 0 : let cfile_state = prev_sk.take_state();
1213 0 : shared.sk = StateSK::Loaded(SafeKeeper::new(cfile_state, wal_store, self.conf.my_id)?);
1214 :
1215 0 : Ok(())
1216 0 : }
1217 :
1218 : /// Update current manager state, useful for debugging manager deadlocks.
1219 250 : pub(crate) fn set_status(&self, status: timeline_manager::Status) {
1220 250 : self.mgr_status.store(status, Ordering::Relaxed);
1221 250 : }
1222 : }
1223 :
1224 : /// Deletes directory and it's contents. Returns false if directory does not exist.
1225 0 : pub async fn delete_dir(path: &Utf8PathBuf) -> Result<bool> {
1226 0 : match fs::remove_dir_all(path).await {
1227 0 : Ok(_) => Ok(true),
1228 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
1229 0 : Err(e) => Err(e.into()),
1230 : }
1231 0 : }
1232 :
1233 : /// Get a path to the tenant directory. If you just need to get a timeline directory,
1234 : /// use WalResidentTimeline::get_timeline_dir instead.
1235 10 : pub fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf {
1236 10 : conf.workdir.join(tenant_id.to_string())
1237 10 : }
1238 :
1239 : /// Get a path to the timeline directory. If you need to read WAL files from disk,
1240 : /// use WalResidentTimeline::get_timeline_dir instead. This function does not check
1241 : /// timeline eviction status and WAL files might not be present on disk.
1242 10 : pub fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf {
1243 10 : get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string())
1244 10 : }
|