Line data Source code
1 : //! This module implements Timeline lifecycle management and has all necessary code
2 : //! to glue together SafeKeeper and all other background services.
3 :
4 : use std::cmp::max;
5 : use std::ops::{Deref, DerefMut};
6 : use std::sync::Arc;
7 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
8 : use std::time::Duration;
9 :
10 : use anyhow::{Result, anyhow, bail};
11 : use camino::{Utf8Path, Utf8PathBuf};
12 : use http_utils::error::ApiError;
13 : use remote_storage::RemotePath;
14 : use safekeeper_api::Term;
15 : use safekeeper_api::membership::Configuration;
16 : use safekeeper_api::models::{
17 : PeerInfo, TimelineMembershipSwitchResponse, TimelineTermBumpResponse,
18 : };
19 : use storage_broker::proto::{SafekeeperTimelineInfo, TenantTimelineId as ProtoTenantTimelineId};
20 : use tokio::fs::{self};
21 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard, watch};
22 : use tokio::time::Instant;
23 : use tokio_util::sync::CancellationToken;
24 : use tracing::*;
25 : use utils::id::{NodeId, TenantId, TenantTimelineId};
26 : use utils::lsn::Lsn;
27 : use utils::sync::gate::Gate;
28 :
29 : use crate::metrics::{FullTimelineInfo, MISC_OPERATION_SECONDS, WalStorageMetrics};
30 : use crate::rate_limit::RateLimiter;
31 : use crate::receive_wal::WalReceivers;
32 : use crate::safekeeper::{AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, TermLsn};
33 : use crate::send_wal::{WalSenders, WalSendersTimelineMetricValues};
34 : use crate::state::{EvictionState, TimelineMemState, TimelinePersistentState, TimelineState};
35 : use crate::timeline_guard::ResidenceGuard;
36 : use crate::timeline_manager::{AtomicStatus, ManagerCtl};
37 : use crate::timelines_set::TimelinesSet;
38 : use crate::wal_backup::{self, remote_timeline_path};
39 : use crate::wal_backup_partial::PartialRemoteSegment;
40 : use crate::wal_storage::{Storage as wal_storage_iface, WalReader};
41 : use crate::{SafeKeeperConf, control_file, debug_dump, timeline_manager, wal_storage};
42 :
43 0 : fn peer_info_from_sk_info(sk_info: &SafekeeperTimelineInfo, ts: Instant) -> PeerInfo {
44 0 : PeerInfo {
45 0 : sk_id: NodeId(sk_info.safekeeper_id),
46 0 : term: sk_info.term,
47 0 : last_log_term: sk_info.last_log_term,
48 0 : flush_lsn: Lsn(sk_info.flush_lsn),
49 0 : commit_lsn: Lsn(sk_info.commit_lsn),
50 0 : local_start_lsn: Lsn(sk_info.local_start_lsn),
51 0 : pg_connstr: sk_info.safekeeper_connstr.clone(),
52 0 : http_connstr: sk_info.http_connstr.clone(),
53 0 : ts,
54 0 : }
55 0 : }
56 :
57 : // vector-based node id -> peer state map with very limited functionality we
58 : // need.
59 : #[derive(Debug, Clone, Default)]
60 : pub struct PeersInfo(pub Vec<PeerInfo>);
61 :
62 : impl PeersInfo {
63 0 : fn get(&mut self, id: NodeId) -> Option<&mut PeerInfo> {
64 0 : self.0.iter_mut().find(|p| p.sk_id == id)
65 0 : }
66 :
67 0 : fn upsert(&mut self, p: &PeerInfo) {
68 0 : match self.get(p.sk_id) {
69 0 : Some(rp) => *rp = p.clone(),
70 0 : None => self.0.push(p.clone()),
71 : }
72 0 : }
73 : }
74 :
75 : pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>;
76 :
77 : /// WriteGuardSharedState is a wrapper around `RwLockWriteGuard<SharedState>` that
78 : /// automatically updates `watch::Sender` channels with state on drop.
79 : pub struct WriteGuardSharedState<'a> {
80 : tli: Arc<Timeline>,
81 : guard: RwLockWriteGuard<'a, SharedState>,
82 : }
83 :
84 : impl<'a> WriteGuardSharedState<'a> {
85 1255 : fn new(tli: Arc<Timeline>, guard: RwLockWriteGuard<'a, SharedState>) -> Self {
86 1255 : WriteGuardSharedState { tli, guard }
87 1255 : }
88 : }
89 :
90 : impl Deref for WriteGuardSharedState<'_> {
91 : type Target = SharedState;
92 :
93 0 : fn deref(&self) -> &Self::Target {
94 0 : &self.guard
95 0 : }
96 : }
97 :
98 : impl DerefMut for WriteGuardSharedState<'_> {
99 1250 : fn deref_mut(&mut self) -> &mut Self::Target {
100 1250 : &mut self.guard
101 1250 : }
102 : }
103 :
104 : impl Drop for WriteGuardSharedState<'_> {
105 1255 : fn drop(&mut self) {
106 1255 : let term_flush_lsn =
107 1255 : TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn()));
108 1255 : let commit_lsn = self.guard.sk.state().inmem.commit_lsn;
109 1255 :
110 1255 : let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| {
111 1255 : if *old != term_flush_lsn {
112 620 : *old = term_flush_lsn;
113 620 : true
114 : } else {
115 635 : false
116 : }
117 1255 : });
118 1255 :
119 1255 : let _ = self.tli.commit_lsn_watch_tx.send_if_modified(|old| {
120 1255 : if *old != commit_lsn {
121 615 : *old = commit_lsn;
122 615 : true
123 : } else {
124 640 : false
125 : }
126 1255 : });
127 1255 :
128 1255 : // send notification about shared state update
129 1255 : self.tli.shared_state_version_tx.send_modify(|old| {
130 1255 : *old += 1;
131 1255 : });
132 1255 : }
133 : }
134 :
135 : /// This structure is stored in shared state and represents the state of the timeline.
136 : ///
137 : /// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this
138 : /// case, SafeKeeper is not available (because WAL is not present on disk) and all
139 : /// operations can be done only with control file.
140 : pub enum StateSK {
141 : Loaded(SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>),
142 : Offloaded(Box<TimelineState<control_file::FileStorage>>),
143 : // Not used, required for moving between states.
144 : Empty,
145 : }
146 :
147 : impl StateSK {
148 2600 : pub fn flush_lsn(&self) -> Lsn {
149 2600 : match self {
150 2600 : StateSK::Loaded(sk) => sk.wal_store.flush_lsn(),
151 0 : StateSK::Offloaded(state) => match state.eviction_state {
152 0 : EvictionState::Offloaded(flush_lsn) => flush_lsn,
153 0 : _ => panic!("StateSK::Offloaded mismatches with eviction_state from control_file"),
154 : },
155 0 : StateSK::Empty => unreachable!(),
156 : }
157 2600 : }
158 :
159 : /// Get a reference to the control file's timeline state.
160 2631 : pub fn state(&self) -> &TimelineState<control_file::FileStorage> {
161 2631 : match self {
162 2631 : StateSK::Loaded(sk) => &sk.state,
163 0 : StateSK::Offloaded(s) => s,
164 0 : StateSK::Empty => unreachable!(),
165 : }
166 2631 : }
167 :
168 10 : pub fn state_mut(&mut self) -> &mut TimelineState<control_file::FileStorage> {
169 10 : match self {
170 10 : StateSK::Loaded(sk) => &mut sk.state,
171 0 : StateSK::Offloaded(s) => s,
172 0 : StateSK::Empty => unreachable!(),
173 : }
174 10 : }
175 :
176 1300 : pub fn last_log_term(&self) -> Term {
177 1300 : self.state()
178 1300 : .acceptor_state
179 1300 : .get_last_log_term(self.flush_lsn())
180 1300 : }
181 :
182 0 : pub async fn term_bump(&mut self, to: Option<Term>) -> Result<TimelineTermBumpResponse> {
183 0 : self.state_mut().term_bump(to).await
184 0 : }
185 :
186 0 : pub async fn membership_switch(
187 0 : &mut self,
188 0 : to: Configuration,
189 0 : ) -> Result<TimelineMembershipSwitchResponse> {
190 0 : self.state_mut().membership_switch(to).await
191 0 : }
192 :
193 : /// Close open WAL files to release FDs.
194 0 : fn close_wal_store(&mut self) {
195 0 : if let StateSK::Loaded(sk) = self {
196 0 : sk.wal_store.close();
197 0 : }
198 0 : }
199 :
200 : /// Update timeline state with peer safekeeper data.
201 0 : pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
202 0 : // update commit_lsn if safekeeper is loaded
203 0 : match self {
204 0 : StateSK::Loaded(sk) => sk.record_safekeeper_info(sk_info).await?,
205 0 : StateSK::Offloaded(_) => {}
206 0 : StateSK::Empty => unreachable!(),
207 : }
208 :
209 : // update everything else, including remote_consistent_lsn and backup_lsn
210 0 : let mut sync_control_file = false;
211 0 : let state = self.state_mut();
212 0 : let wal_seg_size = state.server.wal_seg_size as u64;
213 0 :
214 0 : state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), state.inmem.backup_lsn);
215 0 : sync_control_file |= state.backup_lsn + wal_seg_size < state.inmem.backup_lsn;
216 0 :
217 0 : state.inmem.remote_consistent_lsn = max(
218 0 : Lsn(sk_info.remote_consistent_lsn),
219 0 : state.inmem.remote_consistent_lsn,
220 0 : );
221 0 : sync_control_file |=
222 0 : state.remote_consistent_lsn + wal_seg_size < state.inmem.remote_consistent_lsn;
223 0 :
224 0 : state.inmem.peer_horizon_lsn =
225 0 : max(Lsn(sk_info.peer_horizon_lsn), state.inmem.peer_horizon_lsn);
226 0 : sync_control_file |= state.peer_horizon_lsn + wal_seg_size < state.inmem.peer_horizon_lsn;
227 0 :
228 0 : if sync_control_file {
229 0 : state.flush().await?;
230 0 : }
231 0 : Ok(())
232 0 : }
233 :
234 : /// Previously known as epoch_start_lsn. Needed only for reference in some APIs.
235 0 : pub fn term_start_lsn(&self) -> Lsn {
236 0 : match self {
237 0 : StateSK::Loaded(sk) => sk.term_start_lsn,
238 0 : StateSK::Offloaded(_) => Lsn(0),
239 0 : StateSK::Empty => unreachable!(),
240 : }
241 0 : }
242 :
243 : /// Used for metrics only.
244 0 : pub fn wal_storage_metrics(&self) -> WalStorageMetrics {
245 0 : match self {
246 0 : StateSK::Loaded(sk) => sk.wal_store.get_metrics(),
247 0 : StateSK::Offloaded(_) => WalStorageMetrics::default(),
248 0 : StateSK::Empty => unreachable!(),
249 : }
250 0 : }
251 :
252 : /// Returns WAL storage internal LSNs for debug dump.
253 0 : pub fn wal_storage_internal_state(&self) -> (Lsn, Lsn, Lsn, bool) {
254 0 : match self {
255 0 : StateSK::Loaded(sk) => sk.wal_store.internal_state(),
256 : StateSK::Offloaded(_) => {
257 0 : let flush_lsn = self.flush_lsn();
258 0 : (flush_lsn, flush_lsn, flush_lsn, false)
259 : }
260 0 : StateSK::Empty => unreachable!(),
261 : }
262 0 : }
263 :
264 : /// Access to SafeKeeper object. Panics if offloaded, should be good to use from WalResidentTimeline.
265 1240 : pub fn safekeeper(
266 1240 : &mut self,
267 1240 : ) -> &mut SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage> {
268 1240 : match self {
269 1240 : StateSK::Loaded(sk) => sk,
270 : StateSK::Offloaded(_) => {
271 0 : panic!("safekeeper is offloaded, cannot be used")
272 : }
273 0 : StateSK::Empty => unreachable!(),
274 : }
275 1240 : }
276 :
277 : /// Moves control file's state structure out of the enum. Used to switch states.
278 0 : fn take_state(self) -> TimelineState<control_file::FileStorage> {
279 0 : match self {
280 0 : StateSK::Loaded(sk) => sk.state,
281 0 : StateSK::Offloaded(state) => *state,
282 0 : StateSK::Empty => unreachable!(),
283 : }
284 0 : }
285 : }
286 :
287 : /// Shared state associated with database instance
288 : pub struct SharedState {
289 : /// Safekeeper object
290 : pub(crate) sk: StateSK,
291 : /// In memory list containing state of peers sent in latest messages from them.
292 : pub(crate) peers_info: PeersInfo,
293 : // True value hinders old WAL removal; this is used by snapshotting. We
294 : // could make it a counter, but there is no need to.
295 : pub(crate) wal_removal_on_hold: bool,
296 : }
297 :
298 : impl SharedState {
299 : /// Creates a new SharedState.
300 5 : pub fn new(sk: StateSK) -> Self {
301 5 : Self {
302 5 : sk,
303 5 : peers_info: PeersInfo(vec![]),
304 5 : wal_removal_on_hold: false,
305 5 : }
306 5 : }
307 :
308 : /// Restore SharedState from control file. If file doesn't exist, bails out.
309 0 : pub fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result<Self> {
310 0 : let timeline_dir = get_timeline_dir(conf, ttid);
311 0 : let control_store = control_file::FileStorage::restore_new(&timeline_dir, conf.no_sync)?;
312 0 : if control_store.server.wal_seg_size == 0 {
313 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
314 0 : }
315 :
316 0 : let sk = match control_store.eviction_state {
317 : EvictionState::Present => {
318 0 : let wal_store = wal_storage::PhysicalStorage::new(
319 0 : ttid,
320 0 : &timeline_dir,
321 0 : &control_store,
322 0 : conf.no_sync,
323 0 : )?;
324 0 : StateSK::Loaded(SafeKeeper::new(
325 0 : TimelineState::new(control_store),
326 0 : wal_store,
327 0 : conf.my_id,
328 0 : )?)
329 : }
330 : EvictionState::Offloaded(_) => {
331 0 : StateSK::Offloaded(Box::new(TimelineState::new(control_store)))
332 : }
333 : };
334 :
335 0 : Ok(Self::new(sk))
336 0 : }
337 :
338 5 : pub(crate) fn get_wal_seg_size(&self) -> usize {
339 5 : self.sk.state().server.wal_seg_size as usize
340 5 : }
341 :
342 0 : fn get_safekeeper_info(
343 0 : &self,
344 0 : ttid: &TenantTimelineId,
345 0 : conf: &SafeKeeperConf,
346 0 : standby_apply_lsn: Lsn,
347 0 : ) -> SafekeeperTimelineInfo {
348 0 : SafekeeperTimelineInfo {
349 0 : safekeeper_id: conf.my_id.0,
350 0 : tenant_timeline_id: Some(ProtoTenantTimelineId {
351 0 : tenant_id: ttid.tenant_id.as_ref().to_owned(),
352 0 : timeline_id: ttid.timeline_id.as_ref().to_owned(),
353 0 : }),
354 0 : term: self.sk.state().acceptor_state.term,
355 0 : last_log_term: self.sk.last_log_term(),
356 0 : flush_lsn: self.sk.flush_lsn().0,
357 0 : // note: this value is not flushed to control file yet and can be lost
358 0 : commit_lsn: self.sk.state().inmem.commit_lsn.0,
359 0 : remote_consistent_lsn: self.sk.state().inmem.remote_consistent_lsn.0,
360 0 : peer_horizon_lsn: self.sk.state().inmem.peer_horizon_lsn.0,
361 0 : safekeeper_connstr: conf
362 0 : .advertise_pg_addr
363 0 : .to_owned()
364 0 : .unwrap_or(conf.listen_pg_addr.clone()),
365 0 : http_connstr: conf.listen_http_addr.to_owned(),
366 0 : backup_lsn: self.sk.state().inmem.backup_lsn.0,
367 0 : local_start_lsn: self.sk.state().local_start_lsn.0,
368 0 : availability_zone: conf.availability_zone.clone(),
369 0 : standby_horizon: standby_apply_lsn.0,
370 0 : }
371 0 : }
372 :
373 : /// Get our latest view of alive peers status on the timeline.
374 : /// We pass our own info through the broker as well, so when we don't have connection
375 : /// to the broker returned vec is empty.
376 40 : pub(crate) fn get_peers(&self, heartbeat_timeout: Duration) -> Vec<PeerInfo> {
377 40 : let now = Instant::now();
378 40 : self.peers_info
379 40 : .0
380 40 : .iter()
381 40 : // Regard peer as absent if we haven't heard from it within heartbeat_timeout.
382 40 : .filter(|p| now.duration_since(p.ts) <= heartbeat_timeout)
383 40 : .cloned()
384 40 : .collect()
385 40 : }
386 : }
387 :
388 : #[derive(Debug, thiserror::Error)]
389 : pub enum TimelineError {
390 : #[error("Timeline {0} was cancelled and cannot be used anymore")]
391 : Cancelled(TenantTimelineId),
392 : #[error("Timeline {0} was not found in global map")]
393 : NotFound(TenantTimelineId),
394 : #[error("Timeline {0} creation is in progress")]
395 : CreationInProgress(TenantTimelineId),
396 : #[error("Timeline {0} exists on disk, but wasn't loaded on startup")]
397 : Invalid(TenantTimelineId),
398 : #[error("Timeline {0} is already exists")]
399 : AlreadyExists(TenantTimelineId),
400 : #[error("Timeline {0} is not initialized, wal_seg_size is zero")]
401 : UninitializedWalSegSize(TenantTimelineId),
402 : #[error("Timeline {0} is not initialized, pg_version is unknown")]
403 : UninitialinzedPgVersion(TenantTimelineId),
404 : }
405 :
406 : // Convert to HTTP API error.
407 : impl From<TimelineError> for ApiError {
408 0 : fn from(te: TimelineError) -> ApiError {
409 0 : match te {
410 0 : TimelineError::NotFound(ttid) => {
411 0 : ApiError::NotFound(anyhow!("timeline {} not found", ttid).into())
412 : }
413 0 : _ => ApiError::InternalServerError(anyhow!("{}", te)),
414 : }
415 0 : }
416 : }
417 :
418 : /// We run remote deletion in a background task, this is how it sends its results back.
419 : type RemoteDeletionReceiver = tokio::sync::watch::Receiver<Option<anyhow::Result<()>>>;
420 :
421 : /// Timeline struct manages lifecycle (creation, deletion, restore) of a safekeeper timeline.
422 : /// It also holds SharedState and provides mutually exclusive access to it.
423 : pub struct Timeline {
424 : pub ttid: TenantTimelineId,
425 : pub remote_path: RemotePath,
426 :
427 : /// Used to broadcast commit_lsn updates to all background jobs.
428 : commit_lsn_watch_tx: watch::Sender<Lsn>,
429 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
430 :
431 : /// Broadcasts (current term, flush_lsn) updates, walsender is interested in
432 : /// them when sending in recovery mode (to walproposer or peers). Note: this
433 : /// is just a notification, WAL reading should always done with lock held as
434 : /// term can change otherwise.
435 : term_flush_lsn_watch_tx: watch::Sender<TermLsn>,
436 : term_flush_lsn_watch_rx: watch::Receiver<TermLsn>,
437 :
438 : /// Broadcasts shared state updates.
439 : shared_state_version_tx: watch::Sender<usize>,
440 : shared_state_version_rx: watch::Receiver<usize>,
441 :
442 : /// Safekeeper and other state, that should remain consistent and
443 : /// synchronized with the disk. This is tokio mutex as we write WAL to disk
444 : /// while holding it, ensuring that consensus checks are in order.
445 : mutex: RwLock<SharedState>,
446 : walsenders: Arc<WalSenders>,
447 : walreceivers: Arc<WalReceivers>,
448 : timeline_dir: Utf8PathBuf,
449 : manager_ctl: ManagerCtl,
450 : conf: Arc<SafeKeeperConf>,
451 :
452 : remote_deletion: std::sync::Mutex<Option<RemoteDeletionReceiver>>,
453 :
454 : /// Hold this gate from code that depends on the Timeline's non-shut-down state. While holding
455 : /// this gate, you must respect [`Timeline::cancel`]
456 : pub(crate) gate: Gate,
457 :
458 : /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires
459 : pub(crate) cancel: CancellationToken,
460 :
461 : // timeline_manager controlled state
462 : pub(crate) broker_active: AtomicBool,
463 : pub(crate) wal_backup_active: AtomicBool,
464 : pub(crate) last_removed_segno: AtomicU64,
465 : pub(crate) mgr_status: AtomicStatus,
466 : }
467 :
468 : impl Timeline {
469 : /// Constructs a new timeline.
470 5 : pub fn new(
471 5 : ttid: TenantTimelineId,
472 5 : timeline_dir: &Utf8Path,
473 5 : remote_path: &RemotePath,
474 5 : shared_state: SharedState,
475 5 : conf: Arc<SafeKeeperConf>,
476 5 : ) -> Arc<Self> {
477 5 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
478 5 : watch::channel(shared_state.sk.state().commit_lsn);
479 5 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((
480 5 : shared_state.sk.last_log_term(),
481 5 : shared_state.sk.flush_lsn(),
482 5 : )));
483 5 : let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
484 5 :
485 5 : let walreceivers = WalReceivers::new();
486 5 :
487 5 : Arc::new(Self {
488 5 : ttid,
489 5 : remote_path: remote_path.to_owned(),
490 5 : timeline_dir: timeline_dir.to_owned(),
491 5 : commit_lsn_watch_tx,
492 5 : commit_lsn_watch_rx,
493 5 : term_flush_lsn_watch_tx,
494 5 : term_flush_lsn_watch_rx,
495 5 : shared_state_version_tx,
496 5 : shared_state_version_rx,
497 5 : mutex: RwLock::new(shared_state),
498 5 : walsenders: WalSenders::new(walreceivers.clone()),
499 5 : walreceivers,
500 5 : gate: Default::default(),
501 5 : cancel: CancellationToken::default(),
502 5 : remote_deletion: std::sync::Mutex::new(None),
503 5 : manager_ctl: ManagerCtl::new(),
504 5 : conf,
505 5 : broker_active: AtomicBool::new(false),
506 5 : wal_backup_active: AtomicBool::new(false),
507 5 : last_removed_segno: AtomicU64::new(0),
508 5 : mgr_status: AtomicStatus::new(),
509 5 : })
510 5 : }
511 :
512 : /// Load existing timeline from disk.
513 0 : pub fn load_timeline(
514 0 : conf: Arc<SafeKeeperConf>,
515 0 : ttid: TenantTimelineId,
516 0 : ) -> Result<Arc<Timeline>> {
517 0 : let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered();
518 :
519 0 : let shared_state = SharedState::restore(conf.as_ref(), &ttid)?;
520 0 : let timeline_dir = get_timeline_dir(conf.as_ref(), &ttid);
521 0 : let remote_path = remote_timeline_path(&ttid)?;
522 :
523 0 : Ok(Timeline::new(
524 0 : ttid,
525 0 : &timeline_dir,
526 0 : &remote_path,
527 0 : shared_state,
528 0 : conf,
529 0 : ))
530 0 : }
531 :
532 : /// Bootstrap new or existing timeline starting background tasks.
533 5 : pub fn bootstrap(
534 5 : self: &Arc<Timeline>,
535 5 : _shared_state: &mut WriteGuardSharedState<'_>,
536 5 : conf: &SafeKeeperConf,
537 5 : broker_active_set: Arc<TimelinesSet>,
538 5 : partial_backup_rate_limiter: RateLimiter,
539 5 : ) {
540 5 : let (tx, rx) = self.manager_ctl.bootstrap_manager();
541 :
542 5 : let Ok(gate_guard) = self.gate.enter() else {
543 : // Init raced with shutdown
544 0 : return;
545 : };
546 :
547 : // Start manager task which will monitor timeline state and update
548 : // background tasks.
549 5 : tokio::spawn({
550 5 : let this = self.clone();
551 5 : let conf = conf.clone();
552 5 : async move {
553 5 : let _gate_guard = gate_guard;
554 5 : timeline_manager::main_task(
555 5 : ManagerTimeline { tli: this },
556 5 : conf,
557 5 : broker_active_set,
558 5 : tx,
559 5 : rx,
560 5 : partial_backup_rate_limiter,
561 5 : )
562 5 : .await
563 5 : }
564 5 : });
565 5 : }
566 :
567 : /// Cancel the timeline, requesting background activity to stop. Closing
568 : /// the `self.gate` waits for that.
569 0 : pub async fn cancel(&self) {
570 0 : info!("timeline {} shutting down", self.ttid);
571 0 : self.cancel.cancel();
572 0 : }
573 :
574 : /// Background timeline activities (which hold Timeline::gate) will no
575 : /// longer run once this function completes. `Self::cancel` must have been
576 : /// already called.
577 0 : pub async fn close(&self) {
578 0 : assert!(self.cancel.is_cancelled());
579 :
580 : // Wait for any concurrent tasks to stop using this timeline, to avoid e.g. attempts
581 : // to read deleted files.
582 0 : self.gate.close().await;
583 0 : }
584 :
585 : /// Delete timeline from disk completely, by removing timeline directory.
586 : ///
587 : /// Also deletes WAL in s3. Might fail if e.g. s3 is unavailable, but
588 : /// deletion API endpoint is retriable.
589 : ///
590 : /// Timeline must be in shut-down state (i.e. call [`Self::close`] first)
591 0 : pub async fn delete(
592 0 : &self,
593 0 : shared_state: &mut WriteGuardSharedState<'_>,
594 0 : only_local: bool,
595 0 : ) -> Result<bool> {
596 0 : // Assert that [`Self::close`] was already called
597 0 : assert!(self.cancel.is_cancelled());
598 0 : assert!(self.gate.close_complete());
599 :
600 0 : info!("deleting timeline {} from disk", self.ttid);
601 :
602 : // Close associated FDs. Nobody will be able to touch timeline data once
603 : // it is cancelled, so WAL storage won't be opened again.
604 0 : shared_state.sk.close_wal_store();
605 0 :
606 0 : if !only_local && self.conf.is_wal_backup_enabled() {
607 0 : self.remote_delete().await?;
608 0 : }
609 0 : let dir_existed = delete_dir(&self.timeline_dir).await?;
610 0 : Ok(dir_existed)
611 0 : }
612 :
613 : /// Delete timeline content from remote storage. If the returned future is dropped,
614 : /// deletion will continue in the background.
615 : ///
616 : /// This function ordinarily spawns a task and stashes a result receiver into [`Self::remote_deletion`]. If
617 : /// deletion is already happening, it may simply wait for an existing task's result.
618 : ///
619 : /// Note: we concurrently delete remote storage data from multiple
620 : /// safekeepers. That's ok, s3 replies 200 if object doesn't exist and we
621 : /// do some retries anyway.
622 0 : async fn remote_delete(&self) -> Result<()> {
623 : // We will start a background task to do the deletion, so that it proceeds even if our
624 : // API request is dropped. Future requests will see the existing deletion task and wait
625 : // for it to complete.
626 0 : let mut result_rx = {
627 0 : let mut remote_deletion_state = self.remote_deletion.lock().unwrap();
628 0 : let result_rx = if let Some(result_rx) = remote_deletion_state.as_ref() {
629 0 : if let Some(result) = result_rx.borrow().as_ref() {
630 0 : if let Err(e) = result {
631 : // A previous remote deletion failed: we will start a new one
632 0 : tracing::error!("remote deletion failed, will retry ({e})");
633 0 : None
634 : } else {
635 : // A previous remote deletion call already succeeded
636 0 : return Ok(());
637 : }
638 : } else {
639 : // Remote deletion is still in flight
640 0 : Some(result_rx.clone())
641 : }
642 : } else {
643 : // Remote deletion was not attempted yet, start it now.
644 0 : None
645 : };
646 :
647 0 : match result_rx {
648 0 : Some(result_rx) => result_rx,
649 0 : None => self.start_remote_delete(&mut remote_deletion_state),
650 : }
651 : };
652 :
653 : // Wait for a result
654 0 : let Ok(result) = result_rx.wait_for(|v| v.is_some()).await else {
655 : // Unexpected: sender should always send a result before dropping the channel, even if it has an error
656 0 : return Err(anyhow::anyhow!(
657 0 : "remote deletion task future was dropped without sending a result"
658 0 : ));
659 : };
660 :
661 0 : result
662 0 : .as_ref()
663 0 : .expect("We did a wait_for on this being Some above")
664 0 : .as_ref()
665 0 : .map(|_| ())
666 0 : .map_err(|e| anyhow::anyhow!("remote deletion failed: {e}"))
667 0 : }
668 :
669 : /// Spawn background task to do remote deletion, return a receiver for its outcome
670 0 : fn start_remote_delete(
671 0 : &self,
672 0 : guard: &mut std::sync::MutexGuard<Option<RemoteDeletionReceiver>>,
673 0 : ) -> RemoteDeletionReceiver {
674 0 : tracing::info!("starting remote deletion");
675 0 : let (result_tx, result_rx) = tokio::sync::watch::channel(None);
676 0 : let ttid = self.ttid;
677 0 : tokio::task::spawn(
678 0 : async move {
679 0 : let r = wal_backup::delete_timeline(&ttid).await;
680 0 : if let Err(e) = &r {
681 : // Log error here in case nobody ever listens for our result (e.g. dropped API request)
682 0 : tracing::error!("remote deletion failed: {e}");
683 0 : }
684 :
685 : // Ignore send results: it's legal for the Timeline to give up waiting for us.
686 0 : let _ = result_tx.send(Some(r));
687 0 : }
688 0 : .instrument(info_span!("remote_delete", timeline = %self.ttid)),
689 : );
690 :
691 0 : **guard = Some(result_rx.clone());
692 0 :
693 0 : result_rx
694 0 : }
695 :
696 : /// Returns if timeline is cancelled.
697 2505 : pub fn is_cancelled(&self) -> bool {
698 2505 : self.cancel.is_cancelled()
699 2505 : }
700 :
701 : /// Take a writing mutual exclusive lock on timeline shared_state.
702 1255 : pub async fn write_shared_state<'a>(self: &'a Arc<Self>) -> WriteGuardSharedState<'a> {
703 1255 : WriteGuardSharedState::new(self.clone(), self.mutex.write().await)
704 1255 : }
705 :
706 58 : pub async fn read_shared_state(&self) -> ReadGuardSharedState {
707 58 : self.mutex.read().await
708 58 : }
709 :
710 : /// Returns commit_lsn watch channel.
711 5 : pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver<Lsn> {
712 5 : self.commit_lsn_watch_rx.clone()
713 5 : }
714 :
715 : /// Returns term_flush_lsn watch channel.
716 0 : pub fn get_term_flush_lsn_watch_rx(&self) -> watch::Receiver<TermLsn> {
717 0 : self.term_flush_lsn_watch_rx.clone()
718 0 : }
719 :
720 : /// Returns watch channel for SharedState update version.
721 5 : pub fn get_state_version_rx(&self) -> watch::Receiver<usize> {
722 5 : self.shared_state_version_rx.clone()
723 5 : }
724 :
725 : /// Returns wal_seg_size.
726 5 : pub async fn get_wal_seg_size(&self) -> usize {
727 5 : self.read_shared_state().await.get_wal_seg_size()
728 5 : }
729 :
730 : /// Returns state of the timeline.
731 8 : pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) {
732 8 : let state = self.read_shared_state().await;
733 8 : (
734 8 : state.sk.state().inmem.clone(),
735 8 : TimelinePersistentState::clone(state.sk.state()),
736 8 : )
737 8 : }
738 :
739 : /// Returns latest backup_lsn.
740 0 : pub async fn get_wal_backup_lsn(&self) -> Lsn {
741 0 : self.read_shared_state().await.sk.state().inmem.backup_lsn
742 0 : }
743 :
744 : /// Sets backup_lsn to the given value.
745 0 : pub async fn set_wal_backup_lsn(self: &Arc<Self>, backup_lsn: Lsn) -> Result<()> {
746 0 : if self.is_cancelled() {
747 0 : bail!(TimelineError::Cancelled(self.ttid));
748 0 : }
749 :
750 0 : let mut state = self.write_shared_state().await;
751 0 : state.sk.state_mut().inmem.backup_lsn = max(state.sk.state().inmem.backup_lsn, backup_lsn);
752 0 : // we should check whether to shut down offloader, but this will be done
753 0 : // soon by peer communication anyway.
754 0 : Ok(())
755 0 : }
756 :
757 : /// Get safekeeper info for broadcasting to broker and other peers.
758 0 : pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo {
759 0 : let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn;
760 0 : let shared_state = self.read_shared_state().await;
761 0 : shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn)
762 0 : }
763 :
764 : /// Update timeline state with peer safekeeper data.
765 0 : pub async fn record_safekeeper_info(
766 0 : self: &Arc<Self>,
767 0 : sk_info: SafekeeperTimelineInfo,
768 0 : ) -> Result<()> {
769 : {
770 0 : let mut shared_state = self.write_shared_state().await;
771 0 : shared_state.sk.record_safekeeper_info(&sk_info).await?;
772 0 : let peer_info = peer_info_from_sk_info(&sk_info, Instant::now());
773 0 : shared_state.peers_info.upsert(&peer_info);
774 0 : }
775 0 : Ok(())
776 0 : }
777 :
778 0 : pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec<PeerInfo> {
779 0 : let shared_state = self.read_shared_state().await;
780 0 : shared_state.get_peers(conf.heartbeat_timeout)
781 0 : }
782 :
783 5 : pub fn get_walsenders(&self) -> &Arc<WalSenders> {
784 5 : &self.walsenders
785 5 : }
786 :
787 15 : pub fn get_walreceivers(&self) -> &Arc<WalReceivers> {
788 15 : &self.walreceivers
789 15 : }
790 :
791 : /// Returns flush_lsn.
792 0 : pub async fn get_flush_lsn(&self) -> Lsn {
793 0 : self.read_shared_state().await.sk.flush_lsn()
794 0 : }
795 :
796 : /// Gather timeline data for metrics.
797 0 : pub async fn info_for_metrics(&self) -> Option<FullTimelineInfo> {
798 0 : if self.is_cancelled() {
799 0 : return None;
800 0 : }
801 0 :
802 0 : let WalSendersTimelineMetricValues {
803 0 : ps_feedback_counter,
804 0 : last_ps_feedback,
805 0 : interpreted_wal_reader_tasks,
806 0 : } = self.walsenders.info_for_metrics();
807 :
808 0 : let state = self.read_shared_state().await;
809 0 : Some(FullTimelineInfo {
810 0 : ttid: self.ttid,
811 0 : ps_feedback_count: ps_feedback_counter,
812 0 : last_ps_feedback,
813 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
814 0 : timeline_is_active: self.broker_active.load(Ordering::Relaxed),
815 0 : num_computes: self.walreceivers.get_num() as u32,
816 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
817 0 : interpreted_wal_reader_tasks,
818 0 : epoch_start_lsn: state.sk.term_start_lsn(),
819 0 : mem_state: state.sk.state().inmem.clone(),
820 0 : persisted_state: TimelinePersistentState::clone(state.sk.state()),
821 0 : flush_lsn: state.sk.flush_lsn(),
822 0 : wal_storage: state.sk.wal_storage_metrics(),
823 0 : })
824 0 : }
825 :
826 : /// Returns in-memory timeline state to build a full debug dump.
827 0 : pub async fn memory_dump(&self) -> debug_dump::Memory {
828 0 : let state = self.read_shared_state().await;
829 :
830 0 : let (write_lsn, write_record_lsn, flush_lsn, file_open) =
831 0 : state.sk.wal_storage_internal_state();
832 0 :
833 0 : debug_dump::Memory {
834 0 : is_cancelled: self.is_cancelled(),
835 0 : peers_info_len: state.peers_info.0.len(),
836 0 : walsenders: self.walsenders.get_all_public(),
837 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
838 0 : active: self.broker_active.load(Ordering::Relaxed),
839 0 : num_computes: self.walreceivers.get_num() as u32,
840 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
841 0 : epoch_start_lsn: state.sk.term_start_lsn(),
842 0 : mem_state: state.sk.state().inmem.clone(),
843 0 : mgr_status: self.mgr_status.get(),
844 0 : write_lsn,
845 0 : write_record_lsn,
846 0 : flush_lsn,
847 0 : file_open,
848 0 : }
849 0 : }
850 :
851 : /// Apply a function to the control file state and persist it.
852 0 : pub async fn map_control_file<T>(
853 0 : self: &Arc<Self>,
854 0 : f: impl FnOnce(&mut TimelinePersistentState) -> Result<T>,
855 0 : ) -> Result<T> {
856 0 : let mut state = self.write_shared_state().await;
857 0 : let mut persistent_state = state.sk.state_mut().start_change();
858 : // If f returns error, we abort the change and don't persist anything.
859 0 : let res = f(&mut persistent_state)?;
860 : // If persisting fails, we abort the change and return error.
861 0 : state
862 0 : .sk
863 0 : .state_mut()
864 0 : .finish_change(&persistent_state)
865 0 : .await?;
866 0 : Ok(res)
867 0 : }
868 :
869 0 : pub async fn term_bump(self: &Arc<Self>, to: Option<Term>) -> Result<TimelineTermBumpResponse> {
870 0 : let mut state = self.write_shared_state().await;
871 0 : state.sk.term_bump(to).await
872 0 : }
873 :
874 0 : pub async fn membership_switch(
875 0 : self: &Arc<Self>,
876 0 : to: Configuration,
877 0 : ) -> Result<TimelineMembershipSwitchResponse> {
878 0 : let mut state = self.write_shared_state().await;
879 0 : state.sk.membership_switch(to).await
880 0 : }
881 :
882 : /// Guts of [`Self::wal_residence_guard`] and [`Self::try_wal_residence_guard`]
883 10 : async fn do_wal_residence_guard(
884 10 : self: &Arc<Self>,
885 10 : block: bool,
886 10 : ) -> Result<Option<WalResidentTimeline>> {
887 10 : let op_label = if block {
888 10 : "wal_residence_guard"
889 : } else {
890 0 : "try_wal_residence_guard"
891 : };
892 :
893 10 : if self.is_cancelled() {
894 0 : bail!(TimelineError::Cancelled(self.ttid));
895 10 : }
896 10 :
897 10 : debug!("requesting WalResidentTimeline guard");
898 10 : let started_at = Instant::now();
899 10 : let status_before = self.mgr_status.get();
900 :
901 : // Wait 30 seconds for the guard to be acquired. It can time out if someone is
902 : // holding the lock (e.g. during `SafeKeeper::process_msg()`) or manager task
903 : // is stuck.
904 10 : let res = tokio::time::timeout_at(started_at + Duration::from_secs(30), async {
905 10 : if block {
906 10 : self.manager_ctl.wal_residence_guard().await.map(Some)
907 : } else {
908 0 : self.manager_ctl.try_wal_residence_guard().await
909 : }
910 10 : })
911 10 : .await;
912 :
913 10 : let guard = match res {
914 10 : Ok(Ok(guard)) => {
915 10 : let finished_at = Instant::now();
916 10 : let elapsed = finished_at - started_at;
917 10 : MISC_OPERATION_SECONDS
918 10 : .with_label_values(&[op_label])
919 10 : .observe(elapsed.as_secs_f64());
920 10 :
921 10 : guard
922 : }
923 0 : Ok(Err(e)) => {
924 0 : warn!(
925 0 : "error acquiring in {op_label}, statuses {:?} => {:?}",
926 0 : status_before,
927 0 : self.mgr_status.get()
928 : );
929 0 : return Err(e);
930 : }
931 : Err(_) => {
932 0 : warn!(
933 0 : "timeout acquiring in {op_label} guard, statuses {:?} => {:?}",
934 0 : status_before,
935 0 : self.mgr_status.get()
936 : );
937 0 : anyhow::bail!("timeout while acquiring WalResidentTimeline guard");
938 : }
939 : };
940 :
941 10 : Ok(guard.map(|g| WalResidentTimeline::new(self.clone(), g)))
942 10 : }
943 :
944 : /// Get the timeline guard for reading/writing WAL files.
945 : /// If WAL files are not present on disk (evicted), they will be automatically
946 : /// downloaded from remote storage. This is done in the manager task, which is
947 : /// responsible for issuing all guards.
948 : ///
949 : /// NB: don't use this function from timeline_manager, it will deadlock.
950 : /// NB: don't use this function while holding shared_state lock.
951 10 : pub async fn wal_residence_guard(self: &Arc<Self>) -> Result<WalResidentTimeline> {
952 10 : self.do_wal_residence_guard(true)
953 10 : .await
954 10 : .map(|m| m.expect("Always get Some in block=true mode"))
955 10 : }
956 :
957 : /// Get the timeline guard for reading/writing WAL files if the timeline is resident,
958 : /// else return None
959 0 : pub(crate) async fn try_wal_residence_guard(
960 0 : self: &Arc<Self>,
961 0 : ) -> Result<Option<WalResidentTimeline>> {
962 0 : self.do_wal_residence_guard(false).await
963 0 : }
964 :
965 0 : pub async fn backup_partial_reset(self: &Arc<Self>) -> Result<Vec<String>> {
966 0 : self.manager_ctl.backup_partial_reset().await
967 0 : }
968 : }
969 :
970 : /// This is a guard that allows to read/write disk timeline state.
971 : /// All tasks that are trying to read/write WAL from disk should use this guard.
972 : pub struct WalResidentTimeline {
973 : pub tli: Arc<Timeline>,
974 : _guard: ResidenceGuard,
975 : }
976 :
977 : impl WalResidentTimeline {
978 15 : pub fn new(tli: Arc<Timeline>, _guard: ResidenceGuard) -> Self {
979 15 : WalResidentTimeline { tli, _guard }
980 15 : }
981 : }
982 :
983 : impl Deref for WalResidentTimeline {
984 : type Target = Arc<Timeline>;
985 :
986 5662 : fn deref(&self) -> &Self::Target {
987 5662 : &self.tli
988 5662 : }
989 : }
990 :
991 : impl WalResidentTimeline {
992 : /// Returns true if walsender should stop sending WAL to pageserver. We
993 : /// terminate it if remote_consistent_lsn reached commit_lsn and there is no
994 : /// computes. While there might be nothing to stream already, we learn about
995 : /// remote_consistent_lsn update through replication feedback, and we want
996 : /// to stop pushing to the broker if pageserver is fully caughtup.
997 0 : pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool {
998 0 : if self.is_cancelled() {
999 0 : return true;
1000 0 : }
1001 0 : let shared_state = self.read_shared_state().await;
1002 0 : if self.walreceivers.get_num() == 0 {
1003 0 : return shared_state.sk.state().inmem.commit_lsn == Lsn(0) || // no data at all yet
1004 0 : reported_remote_consistent_lsn >= shared_state.sk.state().inmem.commit_lsn;
1005 0 : }
1006 0 : false
1007 0 : }
1008 :
1009 : /// Ensure that current term is t, erroring otherwise, and lock the state.
1010 0 : pub async fn acquire_term(&self, t: Term) -> Result<ReadGuardSharedState> {
1011 0 : let ss = self.read_shared_state().await;
1012 0 : if ss.sk.state().acceptor_state.term != t {
1013 0 : bail!(
1014 0 : "failed to acquire term {}, current term {}",
1015 0 : t,
1016 0 : ss.sk.state().acceptor_state.term
1017 0 : );
1018 0 : }
1019 0 : Ok(ss)
1020 0 : }
1021 :
1022 : /// Pass arrived message to the safekeeper.
1023 1240 : pub async fn process_msg(
1024 1240 : &self,
1025 1240 : msg: &ProposerAcceptorMessage,
1026 1240 : ) -> Result<Option<AcceptorProposerMessage>> {
1027 1240 : if self.is_cancelled() {
1028 0 : bail!(TimelineError::Cancelled(self.ttid));
1029 1240 : }
1030 :
1031 : let mut rmsg: Option<AcceptorProposerMessage>;
1032 : {
1033 1240 : let mut shared_state = self.write_shared_state().await;
1034 1240 : rmsg = shared_state.sk.safekeeper().process_msg(msg).await?;
1035 :
1036 : // if this is AppendResponse, fill in proper hot standby feedback.
1037 620 : if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg {
1038 620 : resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback;
1039 620 : }
1040 : }
1041 1240 : Ok(rmsg)
1042 1240 : }
1043 :
1044 8 : pub async fn get_walreader(&self, start_lsn: Lsn) -> Result<WalReader> {
1045 8 : let (_, persisted_state) = self.get_state().await;
1046 8 : let enable_remote_read = self.conf.is_wal_backup_enabled();
1047 8 :
1048 8 : WalReader::new(
1049 8 : &self.ttid,
1050 8 : self.timeline_dir.clone(),
1051 8 : &persisted_state,
1052 8 : start_lsn,
1053 8 : enable_remote_read,
1054 8 : )
1055 8 : }
1056 :
1057 0 : pub fn get_timeline_dir(&self) -> Utf8PathBuf {
1058 0 : self.timeline_dir.clone()
1059 0 : }
1060 :
1061 : /// Update in memory remote consistent lsn.
1062 0 : pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) {
1063 0 : let mut shared_state = self.write_shared_state().await;
1064 0 : shared_state.sk.state_mut().inmem.remote_consistent_lsn = max(
1065 0 : shared_state.sk.state().inmem.remote_consistent_lsn,
1066 0 : candidate,
1067 0 : );
1068 0 : }
1069 : }
1070 :
1071 : /// This struct contains methods that are used by timeline manager task.
1072 : pub(crate) struct ManagerTimeline {
1073 : pub(crate) tli: Arc<Timeline>,
1074 : }
1075 :
1076 : impl Deref for ManagerTimeline {
1077 : type Target = Arc<Timeline>;
1078 :
1079 432 : fn deref(&self) -> &Self::Target {
1080 432 : &self.tli
1081 432 : }
1082 : }
1083 :
1084 : impl ManagerTimeline {
1085 0 : pub(crate) fn timeline_dir(&self) -> &Utf8PathBuf {
1086 0 : &self.tli.timeline_dir
1087 0 : }
1088 :
1089 : /// Manager requests this state on startup.
1090 5 : pub(crate) async fn bootstrap_mgr(&self) -> (bool, Option<PartialRemoteSegment>) {
1091 5 : let shared_state = self.read_shared_state().await;
1092 5 : let is_offloaded = matches!(
1093 5 : shared_state.sk.state().eviction_state,
1094 : EvictionState::Offloaded(_)
1095 : );
1096 5 : let partial_backup_uploaded = shared_state.sk.state().partial_backup.uploaded_segment();
1097 5 :
1098 5 : (is_offloaded, partial_backup_uploaded)
1099 5 : }
1100 :
1101 : /// Try to switch state Present->Offloaded.
1102 0 : pub(crate) async fn switch_to_offloaded(
1103 0 : &self,
1104 0 : partial: &PartialRemoteSegment,
1105 0 : ) -> anyhow::Result<()> {
1106 0 : let mut shared = self.write_shared_state().await;
1107 :
1108 : // updating control file
1109 0 : let mut pstate = shared.sk.state_mut().start_change();
1110 :
1111 0 : if !matches!(pstate.eviction_state, EvictionState::Present) {
1112 0 : bail!(
1113 0 : "cannot switch to offloaded state, current state is {:?}",
1114 0 : pstate.eviction_state
1115 0 : );
1116 0 : }
1117 0 :
1118 0 : if partial.flush_lsn != shared.sk.flush_lsn() {
1119 0 : bail!(
1120 0 : "flush_lsn mismatch in partial backup, expected {}, got {}",
1121 0 : shared.sk.flush_lsn(),
1122 0 : partial.flush_lsn
1123 0 : );
1124 0 : }
1125 0 :
1126 0 : if partial.commit_lsn != pstate.commit_lsn {
1127 0 : bail!(
1128 0 : "commit_lsn mismatch in partial backup, expected {}, got {}",
1129 0 : pstate.commit_lsn,
1130 0 : partial.commit_lsn
1131 0 : );
1132 0 : }
1133 0 :
1134 0 : if partial.term != shared.sk.last_log_term() {
1135 0 : bail!(
1136 0 : "term mismatch in partial backup, expected {}, got {}",
1137 0 : shared.sk.last_log_term(),
1138 0 : partial.term
1139 0 : );
1140 0 : }
1141 0 :
1142 0 : pstate.eviction_state = EvictionState::Offloaded(shared.sk.flush_lsn());
1143 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1144 : // control file is now switched to Offloaded state
1145 :
1146 : // now we can switch shared.sk to Offloaded, shouldn't fail
1147 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1148 0 : let cfile_state = prev_sk.take_state();
1149 0 : shared.sk = StateSK::Offloaded(Box::new(cfile_state));
1150 0 :
1151 0 : Ok(())
1152 0 : }
1153 :
1154 : /// Try to switch state Offloaded->Present.
1155 0 : pub(crate) async fn switch_to_present(&self) -> anyhow::Result<()> {
1156 0 : let mut shared = self.write_shared_state().await;
1157 :
1158 : // trying to restore WAL storage
1159 0 : let wal_store = wal_storage::PhysicalStorage::new(
1160 0 : &self.ttid,
1161 0 : &self.timeline_dir,
1162 0 : shared.sk.state(),
1163 0 : self.conf.no_sync,
1164 0 : )?;
1165 :
1166 : // updating control file
1167 0 : let mut pstate = shared.sk.state_mut().start_change();
1168 :
1169 0 : if !matches!(pstate.eviction_state, EvictionState::Offloaded(_)) {
1170 0 : bail!(
1171 0 : "cannot switch to present state, current state is {:?}",
1172 0 : pstate.eviction_state
1173 0 : );
1174 0 : }
1175 0 :
1176 0 : if wal_store.flush_lsn() != shared.sk.flush_lsn() {
1177 0 : bail!(
1178 0 : "flush_lsn mismatch in restored WAL, expected {}, got {}",
1179 0 : shared.sk.flush_lsn(),
1180 0 : wal_store.flush_lsn()
1181 0 : );
1182 0 : }
1183 0 :
1184 0 : pstate.eviction_state = EvictionState::Present;
1185 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1186 :
1187 : // now we can switch shared.sk to Present, shouldn't fail
1188 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1189 0 : let cfile_state = prev_sk.take_state();
1190 0 : shared.sk = StateSK::Loaded(SafeKeeper::new(cfile_state, wal_store, self.conf.my_id)?);
1191 :
1192 0 : Ok(())
1193 0 : }
1194 :
1195 : /// Update current manager state, useful for debugging manager deadlocks.
1196 228 : pub(crate) fn set_status(&self, status: timeline_manager::Status) {
1197 228 : self.mgr_status.store(status, Ordering::Relaxed);
1198 228 : }
1199 : }
1200 :
1201 : /// Deletes directory and it's contents. Returns false if directory does not exist.
1202 0 : pub async fn delete_dir(path: &Utf8PathBuf) -> Result<bool> {
1203 0 : match fs::remove_dir_all(path).await {
1204 0 : Ok(_) => Ok(true),
1205 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
1206 0 : Err(e) => Err(e.into()),
1207 : }
1208 0 : }
1209 :
1210 : /// Get a path to the tenant directory. If you just need to get a timeline directory,
1211 : /// use WalResidentTimeline::get_timeline_dir instead.
1212 10 : pub fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf {
1213 10 : conf.workdir.join(tenant_id.to_string())
1214 10 : }
1215 :
1216 : /// Get a path to the timeline directory. If you need to read WAL files from disk,
1217 : /// use WalResidentTimeline::get_timeline_dir instead. This function does not check
1218 : /// timeline eviction status and WAL files might not be present on disk.
1219 10 : pub fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf {
1220 10 : get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string())
1221 10 : }
|