Line data Source code
1 : //! This module implements Timeline lifecycle management and has all necessary code
2 : //! to glue together SafeKeeper and all other background services.
3 :
4 : use anyhow::{anyhow, bail, Result};
5 : use camino::Utf8PathBuf;
6 : use serde::{Deserialize, Serialize};
7 : use tokio::fs::{self};
8 : use tokio_util::sync::CancellationToken;
9 : use utils::id::TenantId;
10 :
11 : use std::cmp::max;
12 : use std::ops::{Deref, DerefMut};
13 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
14 : use std::sync::Arc;
15 : use std::time::Duration;
16 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
17 : use tokio::{sync::watch, time::Instant};
18 : use tracing::*;
19 : use utils::http::error::ApiError;
20 : use utils::{
21 : id::{NodeId, TenantTimelineId},
22 : lsn::Lsn,
23 : };
24 :
25 : use storage_broker::proto::SafekeeperTimelineInfo;
26 : use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
27 :
28 : use crate::receive_wal::WalReceivers;
29 : use crate::safekeeper::{
30 : AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, ServerInfo, Term, TermLsn,
31 : INVALID_TERM,
32 : };
33 : use crate::send_wal::WalSenders;
34 : use crate::state::{EvictionState, TimelineMemState, TimelinePersistentState, TimelineState};
35 : use crate::timeline_guard::ResidenceGuard;
36 : use crate::timeline_manager::{AtomicStatus, ManagerCtl};
37 : use crate::timelines_set::TimelinesSet;
38 : use crate::wal_backup::{self};
39 : use crate::wal_backup_partial::PartialRemoteSegment;
40 : use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION};
41 :
42 : use crate::metrics::{FullTimelineInfo, WalStorageMetrics};
43 : use crate::wal_storage::{Storage as wal_storage_iface, WalReader};
44 : use crate::{debug_dump, timeline_manager, wal_storage};
45 : use crate::{GlobalTimelines, SafeKeeperConf};
46 :
47 : /// Things safekeeper should know about timeline state on peers.
48 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
49 : pub struct PeerInfo {
50 : pub sk_id: NodeId,
51 : pub term: Term,
52 : /// Term of the last entry.
53 : pub last_log_term: Term,
54 : /// LSN of the last record.
55 : pub flush_lsn: Lsn,
56 : pub commit_lsn: Lsn,
57 : /// Since which LSN safekeeper has WAL.
58 : pub local_start_lsn: Lsn,
59 : /// When info was received. Serde annotations are not very useful but make
60 : /// the code compile -- we don't rely on this field externally.
61 : #[serde(skip)]
62 : #[serde(default = "Instant::now")]
63 : ts: Instant,
64 : pub pg_connstr: String,
65 : pub http_connstr: String,
66 : }
67 :
68 : impl PeerInfo {
69 0 : fn from_sk_info(sk_info: &SafekeeperTimelineInfo, ts: Instant) -> PeerInfo {
70 0 : PeerInfo {
71 0 : sk_id: NodeId(sk_info.safekeeper_id),
72 0 : term: sk_info.term,
73 0 : last_log_term: sk_info.last_log_term,
74 0 : flush_lsn: Lsn(sk_info.flush_lsn),
75 0 : commit_lsn: Lsn(sk_info.commit_lsn),
76 0 : local_start_lsn: Lsn(sk_info.local_start_lsn),
77 0 : pg_connstr: sk_info.safekeeper_connstr.clone(),
78 0 : http_connstr: sk_info.http_connstr.clone(),
79 0 : ts,
80 0 : }
81 0 : }
82 : }
83 :
84 : // vector-based node id -> peer state map with very limited functionality we
85 : // need.
86 : #[derive(Debug, Clone, Default)]
87 : pub struct PeersInfo(pub Vec<PeerInfo>);
88 :
89 : impl PeersInfo {
90 0 : fn get(&mut self, id: NodeId) -> Option<&mut PeerInfo> {
91 0 : self.0.iter_mut().find(|p| p.sk_id == id)
92 0 : }
93 :
94 0 : fn upsert(&mut self, p: &PeerInfo) {
95 0 : match self.get(p.sk_id) {
96 0 : Some(rp) => *rp = p.clone(),
97 0 : None => self.0.push(p.clone()),
98 : }
99 0 : }
100 : }
101 :
102 : pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>;
103 :
104 : /// WriteGuardSharedState is a wrapper around `RwLockWriteGuard<SharedState>` that
105 : /// automatically updates `watch::Sender` channels with state on drop.
106 : pub struct WriteGuardSharedState<'a> {
107 : tli: Arc<Timeline>,
108 : guard: RwLockWriteGuard<'a, SharedState>,
109 : skip_update: bool,
110 : }
111 :
112 : impl<'a> WriteGuardSharedState<'a> {
113 0 : fn new(tli: Arc<Timeline>, guard: RwLockWriteGuard<'a, SharedState>) -> Self {
114 0 : WriteGuardSharedState {
115 0 : tli,
116 0 : guard,
117 0 : skip_update: false,
118 0 : }
119 0 : }
120 : }
121 :
122 : impl<'a> Deref for WriteGuardSharedState<'a> {
123 : type Target = SharedState;
124 :
125 0 : fn deref(&self) -> &Self::Target {
126 0 : &self.guard
127 0 : }
128 : }
129 :
130 : impl<'a> DerefMut for WriteGuardSharedState<'a> {
131 0 : fn deref_mut(&mut self) -> &mut Self::Target {
132 0 : &mut self.guard
133 0 : }
134 : }
135 :
136 : impl<'a> Drop for WriteGuardSharedState<'a> {
137 0 : fn drop(&mut self) {
138 0 : let term_flush_lsn =
139 0 : TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn()));
140 0 : let commit_lsn = self.guard.sk.state().inmem.commit_lsn;
141 0 :
142 0 : let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| {
143 0 : if *old != term_flush_lsn {
144 0 : *old = term_flush_lsn;
145 0 : true
146 : } else {
147 0 : false
148 : }
149 0 : });
150 0 :
151 0 : let _ = self.tli.commit_lsn_watch_tx.send_if_modified(|old| {
152 0 : if *old != commit_lsn {
153 0 : *old = commit_lsn;
154 0 : true
155 : } else {
156 0 : false
157 : }
158 0 : });
159 0 :
160 0 : if !self.skip_update {
161 0 : // send notification about shared state update
162 0 : self.tli.shared_state_version_tx.send_modify(|old| {
163 0 : *old += 1;
164 0 : });
165 0 : }
166 0 : }
167 : }
168 :
169 : /// This structure is stored in shared state and represents the state of the timeline.
170 : /// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this
171 : /// case, SafeKeeper is not available (because WAL is not present on disk) and all
172 : /// operations can be done only with control file.
173 : pub enum StateSK {
174 : Loaded(SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>),
175 : Offloaded(Box<TimelineState<control_file::FileStorage>>),
176 : // Not used, required for moving between states.
177 : Empty,
178 : }
179 :
180 : impl StateSK {
181 0 : pub fn flush_lsn(&self) -> Lsn {
182 0 : match self {
183 0 : StateSK::Loaded(sk) => sk.wal_store.flush_lsn(),
184 0 : StateSK::Offloaded(state) => match state.eviction_state {
185 0 : EvictionState::Offloaded(flush_lsn) => flush_lsn,
186 0 : _ => panic!("StateSK::Offloaded mismatches with eviction_state from control_file"),
187 : },
188 0 : StateSK::Empty => unreachable!(),
189 : }
190 0 : }
191 :
192 : /// Get a reference to the control file's timeline state.
193 0 : pub fn state(&self) -> &TimelineState<control_file::FileStorage> {
194 0 : match self {
195 0 : StateSK::Loaded(sk) => &sk.state,
196 0 : StateSK::Offloaded(ref s) => s,
197 0 : StateSK::Empty => unreachable!(),
198 : }
199 0 : }
200 :
201 0 : pub fn state_mut(&mut self) -> &mut TimelineState<control_file::FileStorage> {
202 0 : match self {
203 0 : StateSK::Loaded(sk) => &mut sk.state,
204 0 : StateSK::Offloaded(ref mut s) => s,
205 0 : StateSK::Empty => unreachable!(),
206 : }
207 0 : }
208 :
209 0 : pub fn last_log_term(&self) -> Term {
210 0 : self.state()
211 0 : .acceptor_state
212 0 : .get_last_log_term(self.flush_lsn())
213 0 : }
214 :
215 : /// Close open WAL files to release FDs.
216 0 : fn close_wal_store(&mut self) {
217 0 : if let StateSK::Loaded(sk) = self {
218 0 : sk.wal_store.close();
219 0 : }
220 0 : }
221 :
222 : /// Update timeline state with peer safekeeper data.
223 0 : pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
224 0 : // update commit_lsn if safekeeper is loaded
225 0 : match self {
226 0 : StateSK::Loaded(sk) => sk.record_safekeeper_info(sk_info).await?,
227 0 : StateSK::Offloaded(_) => {}
228 0 : StateSK::Empty => unreachable!(),
229 : }
230 :
231 : // update everything else, including remote_consistent_lsn and backup_lsn
232 0 : let mut sync_control_file = false;
233 0 : let state = self.state_mut();
234 0 : let wal_seg_size = state.server.wal_seg_size as u64;
235 0 :
236 0 : state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), state.inmem.backup_lsn);
237 0 : sync_control_file |= state.backup_lsn + wal_seg_size < state.inmem.backup_lsn;
238 0 :
239 0 : state.inmem.remote_consistent_lsn = max(
240 0 : Lsn(sk_info.remote_consistent_lsn),
241 0 : state.inmem.remote_consistent_lsn,
242 0 : );
243 0 : sync_control_file |=
244 0 : state.remote_consistent_lsn + wal_seg_size < state.inmem.remote_consistent_lsn;
245 0 :
246 0 : state.inmem.peer_horizon_lsn =
247 0 : max(Lsn(sk_info.peer_horizon_lsn), state.inmem.peer_horizon_lsn);
248 0 : sync_control_file |= state.peer_horizon_lsn + wal_seg_size < state.inmem.peer_horizon_lsn;
249 0 :
250 0 : if sync_control_file {
251 0 : state.flush().await?;
252 0 : }
253 0 : Ok(())
254 0 : }
255 :
256 : /// Previously known as epoch_start_lsn. Needed only for reference in some APIs.
257 0 : pub fn term_start_lsn(&self) -> Lsn {
258 0 : match self {
259 0 : StateSK::Loaded(sk) => sk.term_start_lsn,
260 0 : StateSK::Offloaded(_) => Lsn(0),
261 0 : StateSK::Empty => unreachable!(),
262 : }
263 0 : }
264 :
265 : /// Used for metrics only.
266 0 : pub fn wal_storage_metrics(&self) -> WalStorageMetrics {
267 0 : match self {
268 0 : StateSK::Loaded(sk) => sk.wal_store.get_metrics(),
269 0 : StateSK::Offloaded(_) => WalStorageMetrics::default(),
270 0 : StateSK::Empty => unreachable!(),
271 : }
272 0 : }
273 :
274 : /// Returns WAL storage internal LSNs for debug dump.
275 0 : pub fn wal_storage_internal_state(&self) -> (Lsn, Lsn, Lsn, bool) {
276 0 : match self {
277 0 : StateSK::Loaded(sk) => sk.wal_store.internal_state(),
278 : StateSK::Offloaded(_) => {
279 0 : let flush_lsn = self.flush_lsn();
280 0 : (flush_lsn, flush_lsn, flush_lsn, false)
281 : }
282 0 : StateSK::Empty => unreachable!(),
283 : }
284 0 : }
285 :
286 : /// Access to SafeKeeper object. Panics if offloaded, should be good to use from WalResidentTimeline.
287 0 : pub fn safekeeper(
288 0 : &mut self,
289 0 : ) -> &mut SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage> {
290 0 : match self {
291 0 : StateSK::Loaded(sk) => sk,
292 : StateSK::Offloaded(_) => {
293 0 : panic!("safekeeper is offloaded, cannot be used")
294 : }
295 0 : StateSK::Empty => unreachable!(),
296 : }
297 0 : }
298 :
299 : /// Moves control file's state structure out of the enum. Used to switch states.
300 0 : fn take_state(self) -> TimelineState<control_file::FileStorage> {
301 0 : match self {
302 0 : StateSK::Loaded(sk) => sk.state,
303 0 : StateSK::Offloaded(state) => *state,
304 0 : StateSK::Empty => unreachable!(),
305 : }
306 0 : }
307 : }
308 :
309 : /// Shared state associated with database instance
310 : pub struct SharedState {
311 : /// Safekeeper object
312 : pub(crate) sk: StateSK,
313 : /// In memory list containing state of peers sent in latest messages from them.
314 : pub(crate) peers_info: PeersInfo,
315 : // True value hinders old WAL removal; this is used by snapshotting. We
316 : // could make it a counter, but there is no need to.
317 : pub(crate) wal_removal_on_hold: bool,
318 : }
319 :
320 : impl SharedState {
321 : /// Initialize fresh timeline state without persisting anything to disk.
322 0 : fn create_new(
323 0 : conf: &SafeKeeperConf,
324 0 : ttid: &TenantTimelineId,
325 0 : state: TimelinePersistentState,
326 0 : ) -> Result<Self> {
327 0 : if state.server.wal_seg_size == 0 {
328 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
329 0 : }
330 0 :
331 0 : if state.server.pg_version == UNKNOWN_SERVER_VERSION {
332 0 : bail!(TimelineError::UninitialinzedPgVersion(*ttid));
333 0 : }
334 0 :
335 0 : if state.commit_lsn < state.local_start_lsn {
336 0 : bail!(
337 0 : "commit_lsn {} is higher than local_start_lsn {}",
338 0 : state.commit_lsn,
339 0 : state.local_start_lsn
340 0 : );
341 0 : }
342 0 :
343 0 : // We don't want to write anything to disk, because we may have existing timeline there.
344 0 : // These functions should not change anything on disk.
345 0 : let timeline_dir = get_timeline_dir(conf, ttid);
346 0 : let control_store =
347 0 : control_file::FileStorage::create_new(timeline_dir.clone(), conf, state)?;
348 0 : let wal_store =
349 0 : wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?;
350 0 : let sk = SafeKeeper::new(TimelineState::new(control_store), wal_store, conf.my_id)?;
351 :
352 0 : Ok(Self {
353 0 : sk: StateSK::Loaded(sk),
354 0 : peers_info: PeersInfo(vec![]),
355 0 : wal_removal_on_hold: false,
356 0 : })
357 0 : }
358 :
359 : /// Restore SharedState from control file. If file doesn't exist, bails out.
360 0 : fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result<Self> {
361 0 : let timeline_dir = get_timeline_dir(conf, ttid);
362 0 : let control_store = control_file::FileStorage::restore_new(ttid, conf)?;
363 0 : if control_store.server.wal_seg_size == 0 {
364 0 : bail!(TimelineError::UninitializedWalSegSize(*ttid));
365 0 : }
366 :
367 0 : let sk = match control_store.eviction_state {
368 : EvictionState::Present => {
369 0 : let wal_store =
370 0 : wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?;
371 0 : StateSK::Loaded(SafeKeeper::new(
372 0 : TimelineState::new(control_store),
373 0 : wal_store,
374 0 : conf.my_id,
375 0 : )?)
376 : }
377 : EvictionState::Offloaded(_) => {
378 0 : StateSK::Offloaded(Box::new(TimelineState::new(control_store)))
379 : }
380 : };
381 :
382 0 : Ok(Self {
383 0 : sk,
384 0 : peers_info: PeersInfo(vec![]),
385 0 : wal_removal_on_hold: false,
386 0 : })
387 0 : }
388 :
389 0 : pub(crate) fn get_wal_seg_size(&self) -> usize {
390 0 : self.sk.state().server.wal_seg_size as usize
391 0 : }
392 :
393 0 : fn get_safekeeper_info(
394 0 : &self,
395 0 : ttid: &TenantTimelineId,
396 0 : conf: &SafeKeeperConf,
397 0 : standby_apply_lsn: Lsn,
398 0 : ) -> SafekeeperTimelineInfo {
399 0 : SafekeeperTimelineInfo {
400 0 : safekeeper_id: conf.my_id.0,
401 0 : tenant_timeline_id: Some(ProtoTenantTimelineId {
402 0 : tenant_id: ttid.tenant_id.as_ref().to_owned(),
403 0 : timeline_id: ttid.timeline_id.as_ref().to_owned(),
404 0 : }),
405 0 : term: self.sk.state().acceptor_state.term,
406 0 : last_log_term: self.sk.last_log_term(),
407 0 : flush_lsn: self.sk.flush_lsn().0,
408 0 : // note: this value is not flushed to control file yet and can be lost
409 0 : commit_lsn: self.sk.state().inmem.commit_lsn.0,
410 0 : remote_consistent_lsn: self.sk.state().inmem.remote_consistent_lsn.0,
411 0 : peer_horizon_lsn: self.sk.state().inmem.peer_horizon_lsn.0,
412 0 : safekeeper_connstr: conf
413 0 : .advertise_pg_addr
414 0 : .to_owned()
415 0 : .unwrap_or(conf.listen_pg_addr.clone()),
416 0 : http_connstr: conf.listen_http_addr.to_owned(),
417 0 : backup_lsn: self.sk.state().inmem.backup_lsn.0,
418 0 : local_start_lsn: self.sk.state().local_start_lsn.0,
419 0 : availability_zone: conf.availability_zone.clone(),
420 0 : standby_horizon: standby_apply_lsn.0,
421 0 : }
422 0 : }
423 :
424 : /// Get our latest view of alive peers status on the timeline.
425 : /// We pass our own info through the broker as well, so when we don't have connection
426 : /// to the broker returned vec is empty.
427 0 : pub(crate) fn get_peers(&self, heartbeat_timeout: Duration) -> Vec<PeerInfo> {
428 0 : let now = Instant::now();
429 0 : self.peers_info
430 0 : .0
431 0 : .iter()
432 0 : // Regard peer as absent if we haven't heard from it within heartbeat_timeout.
433 0 : .filter(|p| now.duration_since(p.ts) <= heartbeat_timeout)
434 0 : .cloned()
435 0 : .collect()
436 0 : }
437 : }
438 :
439 0 : #[derive(Debug, thiserror::Error)]
440 : pub enum TimelineError {
441 : #[error("Timeline {0} was cancelled and cannot be used anymore")]
442 : Cancelled(TenantTimelineId),
443 : #[error("Timeline {0} was not found in global map")]
444 : NotFound(TenantTimelineId),
445 : #[error("Timeline {0} exists on disk, but wasn't loaded on startup")]
446 : Invalid(TenantTimelineId),
447 : #[error("Timeline {0} is already exists")]
448 : AlreadyExists(TenantTimelineId),
449 : #[error("Timeline {0} is not initialized, wal_seg_size is zero")]
450 : UninitializedWalSegSize(TenantTimelineId),
451 : #[error("Timeline {0} is not initialized, pg_version is unknown")]
452 : UninitialinzedPgVersion(TenantTimelineId),
453 : }
454 :
455 : // Convert to HTTP API error.
456 : impl From<TimelineError> for ApiError {
457 0 : fn from(te: TimelineError) -> ApiError {
458 0 : match te {
459 0 : TimelineError::NotFound(ttid) => {
460 0 : ApiError::NotFound(anyhow!("timeline {} not found", ttid).into())
461 : }
462 0 : _ => ApiError::InternalServerError(anyhow!("{}", te)),
463 : }
464 0 : }
465 : }
466 :
467 : /// Timeline struct manages lifecycle (creation, deletion, restore) of a safekeeper timeline.
468 : /// It also holds SharedState and provides mutually exclusive access to it.
469 : pub struct Timeline {
470 : pub ttid: TenantTimelineId,
471 :
472 : /// Used to broadcast commit_lsn updates to all background jobs.
473 : commit_lsn_watch_tx: watch::Sender<Lsn>,
474 : commit_lsn_watch_rx: watch::Receiver<Lsn>,
475 :
476 : /// Broadcasts (current term, flush_lsn) updates, walsender is interested in
477 : /// them when sending in recovery mode (to walproposer or peers). Note: this
478 : /// is just a notification, WAL reading should always done with lock held as
479 : /// term can change otherwise.
480 : term_flush_lsn_watch_tx: watch::Sender<TermLsn>,
481 : term_flush_lsn_watch_rx: watch::Receiver<TermLsn>,
482 :
483 : /// Broadcasts shared state updates.
484 : shared_state_version_tx: watch::Sender<usize>,
485 : shared_state_version_rx: watch::Receiver<usize>,
486 :
487 : /// Safekeeper and other state, that should remain consistent and
488 : /// synchronized with the disk. This is tokio mutex as we write WAL to disk
489 : /// while holding it, ensuring that consensus checks are in order.
490 : mutex: RwLock<SharedState>,
491 : walsenders: Arc<WalSenders>,
492 : walreceivers: Arc<WalReceivers>,
493 : timeline_dir: Utf8PathBuf,
494 : manager_ctl: ManagerCtl,
495 :
496 : /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires
497 : pub(crate) cancel: CancellationToken,
498 :
499 : // timeline_manager controlled state
500 : pub(crate) broker_active: AtomicBool,
501 : pub(crate) wal_backup_active: AtomicBool,
502 : pub(crate) last_removed_segno: AtomicU64,
503 : pub(crate) mgr_status: AtomicStatus,
504 : }
505 :
506 : impl Timeline {
507 : /// Load existing timeline from disk.
508 0 : pub fn load_timeline(conf: &SafeKeeperConf, ttid: TenantTimelineId) -> Result<Timeline> {
509 0 : let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered();
510 :
511 0 : let shared_state = SharedState::restore(conf, &ttid)?;
512 0 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
513 0 : watch::channel(shared_state.sk.state().commit_lsn);
514 0 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((
515 0 : shared_state.sk.last_log_term(),
516 0 : shared_state.sk.flush_lsn(),
517 0 : )));
518 0 : let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
519 0 :
520 0 : let walreceivers = WalReceivers::new();
521 0 : Ok(Timeline {
522 0 : ttid,
523 0 : commit_lsn_watch_tx,
524 0 : commit_lsn_watch_rx,
525 0 : term_flush_lsn_watch_tx,
526 0 : term_flush_lsn_watch_rx,
527 0 : shared_state_version_tx,
528 0 : shared_state_version_rx,
529 0 : mutex: RwLock::new(shared_state),
530 0 : walsenders: WalSenders::new(walreceivers.clone()),
531 0 : walreceivers,
532 0 : cancel: CancellationToken::default(),
533 0 : timeline_dir: get_timeline_dir(conf, &ttid),
534 0 : manager_ctl: ManagerCtl::new(),
535 0 : broker_active: AtomicBool::new(false),
536 0 : wal_backup_active: AtomicBool::new(false),
537 0 : last_removed_segno: AtomicU64::new(0),
538 0 : mgr_status: AtomicStatus::new(),
539 0 : })
540 0 : }
541 :
542 : /// Create a new timeline, which is not yet persisted to disk.
543 0 : pub fn create_empty(
544 0 : conf: &SafeKeeperConf,
545 0 : ttid: TenantTimelineId,
546 0 : server_info: ServerInfo,
547 0 : commit_lsn: Lsn,
548 0 : local_start_lsn: Lsn,
549 0 : ) -> Result<Timeline> {
550 0 : let (commit_lsn_watch_tx, commit_lsn_watch_rx) = watch::channel(Lsn::INVALID);
551 0 : let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) =
552 0 : watch::channel(TermLsn::from((INVALID_TERM, Lsn::INVALID)));
553 0 : let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
554 0 :
555 0 : let state =
556 0 : TimelinePersistentState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn);
557 0 :
558 0 : let walreceivers = WalReceivers::new();
559 0 : Ok(Timeline {
560 0 : ttid,
561 0 : commit_lsn_watch_tx,
562 0 : commit_lsn_watch_rx,
563 0 : term_flush_lsn_watch_tx,
564 0 : term_flush_lsn_watch_rx,
565 0 : shared_state_version_tx,
566 0 : shared_state_version_rx,
567 0 : mutex: RwLock::new(SharedState::create_new(conf, &ttid, state)?),
568 0 : walsenders: WalSenders::new(walreceivers.clone()),
569 0 : walreceivers,
570 0 : cancel: CancellationToken::default(),
571 0 : timeline_dir: get_timeline_dir(conf, &ttid),
572 0 : manager_ctl: ManagerCtl::new(),
573 0 : broker_active: AtomicBool::new(false),
574 0 : wal_backup_active: AtomicBool::new(false),
575 0 : last_removed_segno: AtomicU64::new(0),
576 0 : mgr_status: AtomicStatus::new(),
577 : })
578 0 : }
579 :
580 : /// Initialize fresh timeline on disk and start background tasks. If init
581 : /// fails, timeline is cancelled and cannot be used anymore.
582 : ///
583 : /// Init is transactional, so if it fails, created files will be deleted,
584 : /// and state on disk should remain unchanged.
585 0 : pub async fn init_new(
586 0 : self: &Arc<Timeline>,
587 0 : shared_state: &mut WriteGuardSharedState<'_>,
588 0 : conf: &SafeKeeperConf,
589 0 : broker_active_set: Arc<TimelinesSet>,
590 0 : ) -> Result<()> {
591 0 : match fs::metadata(&self.timeline_dir).await {
592 : Ok(_) => {
593 : // Timeline directory exists on disk, we should leave state unchanged
594 : // and return error.
595 0 : bail!(TimelineError::Invalid(self.ttid));
596 : }
597 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
598 0 : Err(e) => {
599 0 : return Err(e.into());
600 : }
601 : }
602 :
603 : // Create timeline directory.
604 0 : fs::create_dir_all(&self.timeline_dir).await?;
605 :
606 : // Write timeline to disk and start background tasks.
607 0 : if let Err(e) = shared_state.sk.state_mut().flush().await {
608 : // Bootstrap failed, cancel timeline and remove timeline directory.
609 0 : self.cancel(shared_state);
610 :
611 0 : if let Err(fs_err) = fs::remove_dir_all(&self.timeline_dir).await {
612 0 : warn!(
613 0 : "failed to remove timeline {} directory after bootstrap failure: {}",
614 0 : self.ttid, fs_err
615 : );
616 0 : }
617 :
618 0 : return Err(e);
619 0 : }
620 0 : self.bootstrap(conf, broker_active_set);
621 0 : Ok(())
622 0 : }
623 :
624 : /// Bootstrap new or existing timeline starting background tasks.
625 0 : pub fn bootstrap(
626 0 : self: &Arc<Timeline>,
627 0 : conf: &SafeKeeperConf,
628 0 : broker_active_set: Arc<TimelinesSet>,
629 0 : ) {
630 0 : let (tx, rx) = self.manager_ctl.bootstrap_manager();
631 0 :
632 0 : // Start manager task which will monitor timeline state and update
633 0 : // background tasks.
634 0 : tokio::spawn(timeline_manager::main_task(
635 0 : ManagerTimeline { tli: self.clone() },
636 0 : conf.clone(),
637 0 : broker_active_set,
638 0 : tx,
639 0 : rx,
640 0 : ));
641 0 : }
642 :
643 : /// Delete timeline from disk completely, by removing timeline directory.
644 : /// Background timeline activities will stop eventually.
645 : ///
646 : /// Also deletes WAL in s3. Might fail if e.g. s3 is unavailable, but
647 : /// deletion API endpoint is retriable.
648 0 : pub async fn delete(
649 0 : &self,
650 0 : shared_state: &mut WriteGuardSharedState<'_>,
651 0 : only_local: bool,
652 0 : ) -> Result<bool> {
653 0 : self.cancel(shared_state);
654 0 :
655 0 : // TODO: It's better to wait for s3 offloader termination before
656 0 : // removing data from s3. Though since s3 doesn't have transactions it
657 0 : // still wouldn't guarantee absense of data after removal.
658 0 : let conf = GlobalTimelines::get_global_config();
659 0 : if !only_local && conf.is_wal_backup_enabled() {
660 : // Note: we concurrently delete remote storage data from multiple
661 : // safekeepers. That's ok, s3 replies 200 if object doesn't exist and we
662 : // do some retries anyway.
663 0 : wal_backup::delete_timeline(&self.ttid).await?;
664 0 : }
665 0 : let dir_existed = delete_dir(&self.timeline_dir).await?;
666 0 : Ok(dir_existed)
667 0 : }
668 :
669 : /// Cancel timeline to prevent further usage. Background tasks will stop
670 : /// eventually after receiving cancellation signal.
671 0 : fn cancel(&self, shared_state: &mut WriteGuardSharedState<'_>) {
672 0 : info!("timeline {} is cancelled", self.ttid);
673 0 : self.cancel.cancel();
674 0 : // Close associated FDs. Nobody will be able to touch timeline data once
675 0 : // it is cancelled, so WAL storage won't be opened again.
676 0 : shared_state.sk.close_wal_store();
677 0 : }
678 :
679 : /// Returns if timeline is cancelled.
680 0 : pub fn is_cancelled(&self) -> bool {
681 0 : self.cancel.is_cancelled()
682 0 : }
683 :
684 : /// Take a writing mutual exclusive lock on timeline shared_state.
685 0 : pub async fn write_shared_state<'a>(self: &'a Arc<Self>) -> WriteGuardSharedState<'a> {
686 0 : WriteGuardSharedState::new(self.clone(), self.mutex.write().await)
687 0 : }
688 :
689 0 : pub async fn read_shared_state(&self) -> ReadGuardSharedState {
690 0 : self.mutex.read().await
691 0 : }
692 :
693 : /// Returns commit_lsn watch channel.
694 0 : pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver<Lsn> {
695 0 : self.commit_lsn_watch_rx.clone()
696 0 : }
697 :
698 : /// Returns term_flush_lsn watch channel.
699 0 : pub fn get_term_flush_lsn_watch_rx(&self) -> watch::Receiver<TermLsn> {
700 0 : self.term_flush_lsn_watch_rx.clone()
701 0 : }
702 :
703 : /// Returns watch channel for SharedState update version.
704 0 : pub fn get_state_version_rx(&self) -> watch::Receiver<usize> {
705 0 : self.shared_state_version_rx.clone()
706 0 : }
707 :
708 : /// Returns wal_seg_size.
709 0 : pub async fn get_wal_seg_size(&self) -> usize {
710 0 : self.read_shared_state().await.get_wal_seg_size()
711 0 : }
712 :
713 : /// Returns state of the timeline.
714 0 : pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) {
715 0 : let state = self.read_shared_state().await;
716 0 : (
717 0 : state.sk.state().inmem.clone(),
718 0 : TimelinePersistentState::clone(state.sk.state()),
719 0 : )
720 0 : }
721 :
722 : /// Returns latest backup_lsn.
723 0 : pub async fn get_wal_backup_lsn(&self) -> Lsn {
724 0 : self.read_shared_state().await.sk.state().inmem.backup_lsn
725 0 : }
726 :
727 : /// Sets backup_lsn to the given value.
728 0 : pub async fn set_wal_backup_lsn(self: &Arc<Self>, backup_lsn: Lsn) -> Result<()> {
729 0 : if self.is_cancelled() {
730 0 : bail!(TimelineError::Cancelled(self.ttid));
731 0 : }
732 :
733 0 : let mut state = self.write_shared_state().await;
734 0 : state.sk.state_mut().inmem.backup_lsn = max(state.sk.state().inmem.backup_lsn, backup_lsn);
735 0 : // we should check whether to shut down offloader, but this will be done
736 0 : // soon by peer communication anyway.
737 0 : Ok(())
738 0 : }
739 :
740 : /// Get safekeeper info for broadcasting to broker and other peers.
741 0 : pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo {
742 0 : let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn;
743 0 : let shared_state = self.read_shared_state().await;
744 0 : shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn)
745 0 : }
746 :
747 : /// Update timeline state with peer safekeeper data.
748 0 : pub async fn record_safekeeper_info(
749 0 : self: &Arc<Self>,
750 0 : sk_info: SafekeeperTimelineInfo,
751 0 : ) -> Result<()> {
752 : {
753 0 : let mut shared_state = self.write_shared_state().await;
754 0 : shared_state.sk.record_safekeeper_info(&sk_info).await?;
755 0 : let peer_info = PeerInfo::from_sk_info(&sk_info, Instant::now());
756 0 : shared_state.peers_info.upsert(&peer_info);
757 0 : }
758 0 : Ok(())
759 0 : }
760 :
761 0 : pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec<PeerInfo> {
762 0 : let shared_state = self.read_shared_state().await;
763 0 : shared_state.get_peers(conf.heartbeat_timeout)
764 0 : }
765 :
766 0 : pub fn get_walsenders(&self) -> &Arc<WalSenders> {
767 0 : &self.walsenders
768 0 : }
769 :
770 0 : pub fn get_walreceivers(&self) -> &Arc<WalReceivers> {
771 0 : &self.walreceivers
772 0 : }
773 :
774 : /// Returns flush_lsn.
775 0 : pub async fn get_flush_lsn(&self) -> Lsn {
776 0 : self.read_shared_state().await.sk.flush_lsn()
777 0 : }
778 :
779 : /// Gather timeline data for metrics.
780 0 : pub async fn info_for_metrics(&self) -> Option<FullTimelineInfo> {
781 0 : if self.is_cancelled() {
782 0 : return None;
783 0 : }
784 0 :
785 0 : let (ps_feedback_count, last_ps_feedback) = self.walsenders.get_ps_feedback_stats();
786 0 : let state = self.read_shared_state().await;
787 0 : Some(FullTimelineInfo {
788 0 : ttid: self.ttid,
789 0 : ps_feedback_count,
790 0 : last_ps_feedback,
791 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
792 0 : timeline_is_active: self.broker_active.load(Ordering::Relaxed),
793 0 : num_computes: self.walreceivers.get_num() as u32,
794 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
795 0 : epoch_start_lsn: state.sk.term_start_lsn(),
796 0 : mem_state: state.sk.state().inmem.clone(),
797 0 : persisted_state: TimelinePersistentState::clone(state.sk.state()),
798 0 : flush_lsn: state.sk.flush_lsn(),
799 0 : wal_storage: state.sk.wal_storage_metrics(),
800 0 : })
801 0 : }
802 :
803 : /// Returns in-memory timeline state to build a full debug dump.
804 0 : pub async fn memory_dump(&self) -> debug_dump::Memory {
805 0 : let state = self.read_shared_state().await;
806 :
807 0 : let (write_lsn, write_record_lsn, flush_lsn, file_open) =
808 0 : state.sk.wal_storage_internal_state();
809 0 :
810 0 : debug_dump::Memory {
811 0 : is_cancelled: self.is_cancelled(),
812 0 : peers_info_len: state.peers_info.0.len(),
813 0 : walsenders: self.walsenders.get_all(),
814 0 : wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
815 0 : active: self.broker_active.load(Ordering::Relaxed),
816 0 : num_computes: self.walreceivers.get_num() as u32,
817 0 : last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
818 0 : epoch_start_lsn: state.sk.term_start_lsn(),
819 0 : mem_state: state.sk.state().inmem.clone(),
820 0 : mgr_status: self.mgr_status.get(),
821 0 : write_lsn,
822 0 : write_record_lsn,
823 0 : flush_lsn,
824 0 : file_open,
825 0 : }
826 0 : }
827 :
828 : /// Apply a function to the control file state and persist it.
829 0 : pub async fn map_control_file<T>(
830 0 : self: &Arc<Self>,
831 0 : f: impl FnOnce(&mut TimelinePersistentState) -> Result<T>,
832 0 : ) -> Result<T> {
833 0 : let mut state = self.write_shared_state().await;
834 0 : let mut persistent_state = state.sk.state_mut().start_change();
835 : // If f returns error, we abort the change and don't persist anything.
836 0 : let res = f(&mut persistent_state)?;
837 : // If persisting fails, we abort the change and return error.
838 0 : state
839 0 : .sk
840 0 : .state_mut()
841 0 : .finish_change(&persistent_state)
842 0 : .await?;
843 0 : Ok(res)
844 0 : }
845 :
846 : /// Get the timeline guard for reading/writing WAL files.
847 : /// If WAL files are not present on disk (evicted), they will be automatically
848 : /// downloaded from remote storage. This is done in the manager task, which is
849 : /// responsible for issuing all guards.
850 : ///
851 : /// NB: don't use this function from timeline_manager, it will deadlock.
852 : /// NB: don't use this function while holding shared_state lock.
853 0 : pub async fn wal_residence_guard(self: &Arc<Self>) -> Result<WalResidentTimeline> {
854 0 : if self.is_cancelled() {
855 0 : bail!(TimelineError::Cancelled(self.ttid));
856 0 : }
857 0 :
858 0 : debug!("requesting WalResidentTimeline guard");
859 :
860 : // Wait 5 seconds for the guard to be acquired, should be enough for uneviction.
861 : // If it times out, most likely there is a deadlock in the manager task.
862 0 : let res = tokio::time::timeout(
863 0 : Duration::from_secs(5),
864 0 : self.manager_ctl.wal_residence_guard(),
865 0 : )
866 0 : .await;
867 :
868 0 : let guard = match res {
869 0 : Ok(Ok(guard)) => guard,
870 0 : Ok(Err(e)) => {
871 0 : warn!(
872 0 : "error while acquiring WalResidentTimeline guard (current state {:?}): {}",
873 0 : self.mgr_status.get(),
874 : e
875 : );
876 0 : return Err(e);
877 : }
878 : Err(_) => {
879 0 : warn!(
880 0 : "timeout while acquiring WalResidentTimeline guard (current state {:?})",
881 0 : self.mgr_status.get()
882 : );
883 0 : anyhow::bail!("timeout while acquiring WalResidentTimeline guard");
884 : }
885 : };
886 :
887 0 : Ok(WalResidentTimeline::new(self.clone(), guard))
888 0 : }
889 : }
890 :
891 : /// This is a guard that allows to read/write disk timeline state.
892 : /// All tasks that are trying to read/write WAL from disk should use this guard.
893 : pub struct WalResidentTimeline {
894 : pub tli: Arc<Timeline>,
895 : _guard: ResidenceGuard,
896 : }
897 :
898 : impl WalResidentTimeline {
899 0 : pub fn new(tli: Arc<Timeline>, _guard: ResidenceGuard) -> Self {
900 0 : WalResidentTimeline { tli, _guard }
901 0 : }
902 : }
903 :
904 : impl Deref for WalResidentTimeline {
905 : type Target = Arc<Timeline>;
906 :
907 0 : fn deref(&self) -> &Self::Target {
908 0 : &self.tli
909 0 : }
910 : }
911 :
912 : impl WalResidentTimeline {
913 : /// Returns true if walsender should stop sending WAL to pageserver. We
914 : /// terminate it if remote_consistent_lsn reached commit_lsn and there is no
915 : /// computes. While there might be nothing to stream already, we learn about
916 : /// remote_consistent_lsn update through replication feedback, and we want
917 : /// to stop pushing to the broker if pageserver is fully caughtup.
918 0 : pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool {
919 0 : if self.is_cancelled() {
920 0 : return true;
921 0 : }
922 0 : let shared_state = self.read_shared_state().await;
923 0 : if self.walreceivers.get_num() == 0 {
924 0 : return shared_state.sk.state().inmem.commit_lsn == Lsn(0) || // no data at all yet
925 0 : reported_remote_consistent_lsn >= shared_state.sk.state().inmem.commit_lsn;
926 0 : }
927 0 : false
928 0 : }
929 :
930 : /// Ensure that current term is t, erroring otherwise, and lock the state.
931 0 : pub async fn acquire_term(&self, t: Term) -> Result<ReadGuardSharedState> {
932 0 : let ss = self.read_shared_state().await;
933 0 : if ss.sk.state().acceptor_state.term != t {
934 0 : bail!(
935 0 : "failed to acquire term {}, current term {}",
936 0 : t,
937 0 : ss.sk.state().acceptor_state.term
938 0 : );
939 0 : }
940 0 : Ok(ss)
941 0 : }
942 :
943 : /// Pass arrived message to the safekeeper.
944 0 : pub async fn process_msg(
945 0 : &self,
946 0 : msg: &ProposerAcceptorMessage,
947 0 : ) -> Result<Option<AcceptorProposerMessage>> {
948 0 : if self.is_cancelled() {
949 0 : bail!(TimelineError::Cancelled(self.ttid));
950 0 : }
951 :
952 : let mut rmsg: Option<AcceptorProposerMessage>;
953 : {
954 0 : let mut shared_state = self.write_shared_state().await;
955 0 : rmsg = shared_state.sk.safekeeper().process_msg(msg).await?;
956 :
957 : // if this is AppendResponse, fill in proper hot standby feedback.
958 0 : if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg {
959 0 : resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback;
960 0 : }
961 : }
962 0 : Ok(rmsg)
963 0 : }
964 :
965 0 : pub async fn get_walreader(&self, start_lsn: Lsn) -> Result<WalReader> {
966 0 : let (_, persisted_state) = self.get_state().await;
967 0 : let enable_remote_read = GlobalTimelines::get_global_config().is_wal_backup_enabled();
968 0 :
969 0 : WalReader::new(
970 0 : &self.ttid,
971 0 : self.timeline_dir.clone(),
972 0 : &persisted_state,
973 0 : start_lsn,
974 0 : enable_remote_read,
975 0 : )
976 0 : }
977 :
978 0 : pub fn get_timeline_dir(&self) -> Utf8PathBuf {
979 0 : self.timeline_dir.clone()
980 0 : }
981 :
982 : /// Update in memory remote consistent lsn.
983 0 : pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) {
984 0 : let mut shared_state = self.write_shared_state().await;
985 0 : shared_state.sk.state_mut().inmem.remote_consistent_lsn = max(
986 0 : shared_state.sk.state().inmem.remote_consistent_lsn,
987 0 : candidate,
988 0 : );
989 0 : }
990 : }
991 :
992 : /// This struct contains methods that are used by timeline manager task.
993 : pub(crate) struct ManagerTimeline {
994 : pub(crate) tli: Arc<Timeline>,
995 : }
996 :
997 : impl Deref for ManagerTimeline {
998 : type Target = Arc<Timeline>;
999 :
1000 0 : fn deref(&self) -> &Self::Target {
1001 0 : &self.tli
1002 0 : }
1003 : }
1004 :
1005 : impl ManagerTimeline {
1006 0 : pub(crate) fn timeline_dir(&self) -> &Utf8PathBuf {
1007 0 : &self.tli.timeline_dir
1008 0 : }
1009 :
1010 : /// Manager requests this state on startup.
1011 0 : pub(crate) async fn bootstrap_mgr(&self) -> (bool, Option<PartialRemoteSegment>) {
1012 0 : let shared_state = self.read_shared_state().await;
1013 0 : let is_offloaded = matches!(
1014 0 : shared_state.sk.state().eviction_state,
1015 : EvictionState::Offloaded(_)
1016 : );
1017 0 : let partial_backup_uploaded = shared_state.sk.state().partial_backup.uploaded_segment();
1018 0 :
1019 0 : (is_offloaded, partial_backup_uploaded)
1020 0 : }
1021 :
1022 : /// Try to switch state Present->Offloaded.
1023 0 : pub(crate) async fn switch_to_offloaded(
1024 0 : &self,
1025 0 : partial: &PartialRemoteSegment,
1026 0 : ) -> anyhow::Result<()> {
1027 0 : let mut shared = self.write_shared_state().await;
1028 :
1029 : // updating control file
1030 0 : let mut pstate = shared.sk.state_mut().start_change();
1031 :
1032 0 : if !matches!(pstate.eviction_state, EvictionState::Present) {
1033 0 : bail!(
1034 0 : "cannot switch to offloaded state, current state is {:?}",
1035 0 : pstate.eviction_state
1036 0 : );
1037 0 : }
1038 0 :
1039 0 : if partial.flush_lsn != shared.sk.flush_lsn() {
1040 0 : bail!(
1041 0 : "flush_lsn mismatch in partial backup, expected {}, got {}",
1042 0 : shared.sk.flush_lsn(),
1043 0 : partial.flush_lsn
1044 0 : );
1045 0 : }
1046 0 :
1047 0 : if partial.commit_lsn != pstate.commit_lsn {
1048 0 : bail!(
1049 0 : "commit_lsn mismatch in partial backup, expected {}, got {}",
1050 0 : pstate.commit_lsn,
1051 0 : partial.commit_lsn
1052 0 : );
1053 0 : }
1054 0 :
1055 0 : if partial.term != shared.sk.last_log_term() {
1056 0 : bail!(
1057 0 : "term mismatch in partial backup, expected {}, got {}",
1058 0 : shared.sk.last_log_term(),
1059 0 : partial.term
1060 0 : );
1061 0 : }
1062 0 :
1063 0 : pstate.eviction_state = EvictionState::Offloaded(shared.sk.flush_lsn());
1064 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1065 : // control file is now switched to Offloaded state
1066 :
1067 : // now we can switch shared.sk to Offloaded, shouldn't fail
1068 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1069 0 : let cfile_state = prev_sk.take_state();
1070 0 : shared.sk = StateSK::Offloaded(Box::new(cfile_state));
1071 0 :
1072 0 : Ok(())
1073 0 : }
1074 :
1075 : /// Try to switch state Offloaded->Present.
1076 0 : pub(crate) async fn switch_to_present(&self) -> anyhow::Result<()> {
1077 0 : let conf = GlobalTimelines::get_global_config();
1078 0 : let mut shared = self.write_shared_state().await;
1079 :
1080 : // trying to restore WAL storage
1081 0 : let wal_store = wal_storage::PhysicalStorage::new(
1082 0 : &self.ttid,
1083 0 : self.timeline_dir.clone(),
1084 0 : &conf,
1085 0 : shared.sk.state(),
1086 0 : )?;
1087 :
1088 : // updating control file
1089 0 : let mut pstate = shared.sk.state_mut().start_change();
1090 :
1091 0 : if !matches!(pstate.eviction_state, EvictionState::Offloaded(_)) {
1092 0 : bail!(
1093 0 : "cannot switch to present state, current state is {:?}",
1094 0 : pstate.eviction_state
1095 0 : );
1096 0 : }
1097 0 :
1098 0 : if wal_store.flush_lsn() != shared.sk.flush_lsn() {
1099 0 : bail!(
1100 0 : "flush_lsn mismatch in restored WAL, expected {}, got {}",
1101 0 : shared.sk.flush_lsn(),
1102 0 : wal_store.flush_lsn()
1103 0 : );
1104 0 : }
1105 0 :
1106 0 : pstate.eviction_state = EvictionState::Present;
1107 0 : shared.sk.state_mut().finish_change(&pstate).await?;
1108 :
1109 : // now we can switch shared.sk to Present, shouldn't fail
1110 0 : let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
1111 0 : let cfile_state = prev_sk.take_state();
1112 0 : shared.sk = StateSK::Loaded(SafeKeeper::new(cfile_state, wal_store, conf.my_id)?);
1113 :
1114 0 : Ok(())
1115 0 : }
1116 :
1117 : /// Update current manager state, useful for debugging manager deadlocks.
1118 0 : pub(crate) fn set_status(&self, status: timeline_manager::Status) {
1119 0 : self.mgr_status.store(status, Ordering::Relaxed);
1120 0 : }
1121 : }
1122 :
1123 : /// Deletes directory and it's contents. Returns false if directory does not exist.
1124 0 : async fn delete_dir(path: &Utf8PathBuf) -> Result<bool> {
1125 0 : match fs::remove_dir_all(path).await {
1126 0 : Ok(_) => Ok(true),
1127 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
1128 0 : Err(e) => Err(e.into()),
1129 : }
1130 0 : }
1131 :
1132 : /// Get a path to the tenant directory. If you just need to get a timeline directory,
1133 : /// use WalResidentTimeline::get_timeline_dir instead.
1134 14 : pub(crate) fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf {
1135 14 : conf.workdir.join(tenant_id.to_string())
1136 14 : }
1137 :
1138 : /// Get a path to the timeline directory. If you need to read WAL files from disk,
1139 : /// use WalResidentTimeline::get_timeline_dir instead. This function does not check
1140 : /// timeline eviction status and WAL files might not be present on disk.
1141 14 : pub(crate) fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf {
1142 14 : get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string())
1143 14 : }
|