LCOV - code coverage report
Current view: top level - safekeeper/src - timeline.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 0.8 % 727 6
Test Date: 2024-08-02 21:34:27 Functions: 1.8 % 109 2

            Line data    Source code
       1              : //! This module implements Timeline lifecycle management and has all necessary code
       2              : //! to glue together SafeKeeper and all other background services.
       3              : 
       4              : use anyhow::{anyhow, bail, Result};
       5              : use camino::Utf8PathBuf;
       6              : use serde::{Deserialize, Serialize};
       7              : use tokio::fs::{self};
       8              : use tokio_util::sync::CancellationToken;
       9              : use utils::id::TenantId;
      10              : 
      11              : use std::cmp::max;
      12              : use std::ops::{Deref, DerefMut};
      13              : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
      14              : use std::sync::Arc;
      15              : use std::time::Duration;
      16              : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
      17              : use tokio::{sync::watch, time::Instant};
      18              : use tracing::*;
      19              : use utils::http::error::ApiError;
      20              : use utils::{
      21              :     id::{NodeId, TenantTimelineId},
      22              :     lsn::Lsn,
      23              : };
      24              : 
      25              : use storage_broker::proto::SafekeeperTimelineInfo;
      26              : use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
      27              : 
      28              : use crate::rate_limit::RateLimiter;
      29              : use crate::receive_wal::WalReceivers;
      30              : use crate::safekeeper::{
      31              :     AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, ServerInfo, Term, TermLsn,
      32              :     INVALID_TERM,
      33              : };
      34              : use crate::send_wal::WalSenders;
      35              : use crate::state::{EvictionState, TimelineMemState, TimelinePersistentState, TimelineState};
      36              : use crate::timeline_guard::ResidenceGuard;
      37              : use crate::timeline_manager::{AtomicStatus, ManagerCtl};
      38              : use crate::timelines_set::TimelinesSet;
      39              : use crate::wal_backup::{self};
      40              : use crate::wal_backup_partial::PartialRemoteSegment;
      41              : use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION};
      42              : 
      43              : use crate::metrics::{FullTimelineInfo, WalStorageMetrics, MISC_OPERATION_SECONDS};
      44              : use crate::wal_storage::{Storage as wal_storage_iface, WalReader};
      45              : use crate::{debug_dump, timeline_manager, wal_storage};
      46              : use crate::{GlobalTimelines, SafeKeeperConf};
      47              : 
      48              : /// Things safekeeper should know about timeline state on peers.
      49            0 : #[derive(Debug, Clone, Serialize, Deserialize)]
      50              : pub struct PeerInfo {
      51              :     pub sk_id: NodeId,
      52              :     pub term: Term,
      53              :     /// Term of the last entry.
      54              :     pub last_log_term: Term,
      55              :     /// LSN of the last record.
      56              :     pub flush_lsn: Lsn,
      57              :     pub commit_lsn: Lsn,
      58              :     /// Since which LSN safekeeper has WAL.
      59              :     pub local_start_lsn: Lsn,
      60              :     /// When info was received. Serde annotations are not very useful but make
      61              :     /// the code compile -- we don't rely on this field externally.
      62              :     #[serde(skip)]
      63              :     #[serde(default = "Instant::now")]
      64              :     ts: Instant,
      65              :     pub pg_connstr: String,
      66              :     pub http_connstr: String,
      67              : }
      68              : 
      69              : impl PeerInfo {
      70            0 :     fn from_sk_info(sk_info: &SafekeeperTimelineInfo, ts: Instant) -> PeerInfo {
      71            0 :         PeerInfo {
      72            0 :             sk_id: NodeId(sk_info.safekeeper_id),
      73            0 :             term: sk_info.term,
      74            0 :             last_log_term: sk_info.last_log_term,
      75            0 :             flush_lsn: Lsn(sk_info.flush_lsn),
      76            0 :             commit_lsn: Lsn(sk_info.commit_lsn),
      77            0 :             local_start_lsn: Lsn(sk_info.local_start_lsn),
      78            0 :             pg_connstr: sk_info.safekeeper_connstr.clone(),
      79            0 :             http_connstr: sk_info.http_connstr.clone(),
      80            0 :             ts,
      81            0 :         }
      82            0 :     }
      83              : }
      84              : 
      85              : // vector-based node id -> peer state map with very limited functionality we
      86              : // need.
      87              : #[derive(Debug, Clone, Default)]
      88              : pub struct PeersInfo(pub Vec<PeerInfo>);
      89              : 
      90              : impl PeersInfo {
      91            0 :     fn get(&mut self, id: NodeId) -> Option<&mut PeerInfo> {
      92            0 :         self.0.iter_mut().find(|p| p.sk_id == id)
      93            0 :     }
      94              : 
      95            0 :     fn upsert(&mut self, p: &PeerInfo) {
      96            0 :         match self.get(p.sk_id) {
      97            0 :             Some(rp) => *rp = p.clone(),
      98            0 :             None => self.0.push(p.clone()),
      99              :         }
     100            0 :     }
     101              : }
     102              : 
     103              : pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>;
     104              : 
     105              : /// WriteGuardSharedState is a wrapper around `RwLockWriteGuard<SharedState>` that
     106              : /// automatically updates `watch::Sender` channels with state on drop.
     107              : pub struct WriteGuardSharedState<'a> {
     108              :     tli: Arc<Timeline>,
     109              :     guard: RwLockWriteGuard<'a, SharedState>,
     110              :     skip_update: bool,
     111              : }
     112              : 
     113              : impl<'a> WriteGuardSharedState<'a> {
     114            0 :     fn new(tli: Arc<Timeline>, guard: RwLockWriteGuard<'a, SharedState>) -> Self {
     115            0 :         WriteGuardSharedState {
     116            0 :             tli,
     117            0 :             guard,
     118            0 :             skip_update: false,
     119            0 :         }
     120            0 :     }
     121              : }
     122              : 
     123              : impl<'a> Deref for WriteGuardSharedState<'a> {
     124              :     type Target = SharedState;
     125              : 
     126            0 :     fn deref(&self) -> &Self::Target {
     127            0 :         &self.guard
     128            0 :     }
     129              : }
     130              : 
     131              : impl<'a> DerefMut for WriteGuardSharedState<'a> {
     132            0 :     fn deref_mut(&mut self) -> &mut Self::Target {
     133            0 :         &mut self.guard
     134            0 :     }
     135              : }
     136              : 
     137              : impl<'a> Drop for WriteGuardSharedState<'a> {
     138            0 :     fn drop(&mut self) {
     139            0 :         let term_flush_lsn =
     140            0 :             TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn()));
     141            0 :         let commit_lsn = self.guard.sk.state().inmem.commit_lsn;
     142            0 : 
     143            0 :         let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| {
     144            0 :             if *old != term_flush_lsn {
     145            0 :                 *old = term_flush_lsn;
     146            0 :                 true
     147              :             } else {
     148            0 :                 false
     149              :             }
     150            0 :         });
     151            0 : 
     152            0 :         let _ = self.tli.commit_lsn_watch_tx.send_if_modified(|old| {
     153            0 :             if *old != commit_lsn {
     154            0 :                 *old = commit_lsn;
     155            0 :                 true
     156              :             } else {
     157            0 :                 false
     158              :             }
     159            0 :         });
     160            0 : 
     161            0 :         if !self.skip_update {
     162            0 :             // send notification about shared state update
     163            0 :             self.tli.shared_state_version_tx.send_modify(|old| {
     164            0 :                 *old += 1;
     165            0 :             });
     166            0 :         }
     167            0 :     }
     168              : }
     169              : 
     170              : /// This structure is stored in shared state and represents the state of the timeline.
     171              : /// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this
     172              : /// case, SafeKeeper is not available (because WAL is not present on disk) and all
     173              : /// operations can be done only with control file.
     174              : pub enum StateSK {
     175              :     Loaded(SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>),
     176              :     Offloaded(Box<TimelineState<control_file::FileStorage>>),
     177              :     // Not used, required for moving between states.
     178              :     Empty,
     179              : }
     180              : 
     181              : impl StateSK {
     182            0 :     pub fn flush_lsn(&self) -> Lsn {
     183            0 :         match self {
     184            0 :             StateSK::Loaded(sk) => sk.wal_store.flush_lsn(),
     185            0 :             StateSK::Offloaded(state) => match state.eviction_state {
     186            0 :                 EvictionState::Offloaded(flush_lsn) => flush_lsn,
     187            0 :                 _ => panic!("StateSK::Offloaded mismatches with eviction_state from control_file"),
     188              :             },
     189            0 :             StateSK::Empty => unreachable!(),
     190              :         }
     191            0 :     }
     192              : 
     193              :     /// Get a reference to the control file's timeline state.
     194            0 :     pub fn state(&self) -> &TimelineState<control_file::FileStorage> {
     195            0 :         match self {
     196            0 :             StateSK::Loaded(sk) => &sk.state,
     197            0 :             StateSK::Offloaded(ref s) => s,
     198            0 :             StateSK::Empty => unreachable!(),
     199              :         }
     200            0 :     }
     201              : 
     202            0 :     pub fn state_mut(&mut self) -> &mut TimelineState<control_file::FileStorage> {
     203            0 :         match self {
     204            0 :             StateSK::Loaded(sk) => &mut sk.state,
     205            0 :             StateSK::Offloaded(ref mut s) => s,
     206            0 :             StateSK::Empty => unreachable!(),
     207              :         }
     208            0 :     }
     209              : 
     210            0 :     pub fn last_log_term(&self) -> Term {
     211            0 :         self.state()
     212            0 :             .acceptor_state
     213            0 :             .get_last_log_term(self.flush_lsn())
     214            0 :     }
     215              : 
     216              :     /// Close open WAL files to release FDs.
     217            0 :     fn close_wal_store(&mut self) {
     218            0 :         if let StateSK::Loaded(sk) = self {
     219            0 :             sk.wal_store.close();
     220            0 :         }
     221            0 :     }
     222              : 
     223              :     /// Update timeline state with peer safekeeper data.
     224            0 :     pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
     225            0 :         // update commit_lsn if safekeeper is loaded
     226            0 :         match self {
     227            0 :             StateSK::Loaded(sk) => sk.record_safekeeper_info(sk_info).await?,
     228            0 :             StateSK::Offloaded(_) => {}
     229            0 :             StateSK::Empty => unreachable!(),
     230              :         }
     231              : 
     232              :         // update everything else, including remote_consistent_lsn and backup_lsn
     233            0 :         let mut sync_control_file = false;
     234            0 :         let state = self.state_mut();
     235            0 :         let wal_seg_size = state.server.wal_seg_size as u64;
     236            0 : 
     237            0 :         state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), state.inmem.backup_lsn);
     238            0 :         sync_control_file |= state.backup_lsn + wal_seg_size < state.inmem.backup_lsn;
     239            0 : 
     240            0 :         state.inmem.remote_consistent_lsn = max(
     241            0 :             Lsn(sk_info.remote_consistent_lsn),
     242            0 :             state.inmem.remote_consistent_lsn,
     243            0 :         );
     244            0 :         sync_control_file |=
     245            0 :             state.remote_consistent_lsn + wal_seg_size < state.inmem.remote_consistent_lsn;
     246            0 : 
     247            0 :         state.inmem.peer_horizon_lsn =
     248            0 :             max(Lsn(sk_info.peer_horizon_lsn), state.inmem.peer_horizon_lsn);
     249            0 :         sync_control_file |= state.peer_horizon_lsn + wal_seg_size < state.inmem.peer_horizon_lsn;
     250            0 : 
     251            0 :         if sync_control_file {
     252            0 :             state.flush().await?;
     253            0 :         }
     254            0 :         Ok(())
     255            0 :     }
     256              : 
     257              :     /// Previously known as epoch_start_lsn. Needed only for reference in some APIs.
     258            0 :     pub fn term_start_lsn(&self) -> Lsn {
     259            0 :         match self {
     260            0 :             StateSK::Loaded(sk) => sk.term_start_lsn,
     261            0 :             StateSK::Offloaded(_) => Lsn(0),
     262            0 :             StateSK::Empty => unreachable!(),
     263              :         }
     264            0 :     }
     265              : 
     266              :     /// Used for metrics only.
     267            0 :     pub fn wal_storage_metrics(&self) -> WalStorageMetrics {
     268            0 :         match self {
     269            0 :             StateSK::Loaded(sk) => sk.wal_store.get_metrics(),
     270            0 :             StateSK::Offloaded(_) => WalStorageMetrics::default(),
     271            0 :             StateSK::Empty => unreachable!(),
     272              :         }
     273            0 :     }
     274              : 
     275              :     /// Returns WAL storage internal LSNs for debug dump.
     276            0 :     pub fn wal_storage_internal_state(&self) -> (Lsn, Lsn, Lsn, bool) {
     277            0 :         match self {
     278            0 :             StateSK::Loaded(sk) => sk.wal_store.internal_state(),
     279              :             StateSK::Offloaded(_) => {
     280            0 :                 let flush_lsn = self.flush_lsn();
     281            0 :                 (flush_lsn, flush_lsn, flush_lsn, false)
     282              :             }
     283            0 :             StateSK::Empty => unreachable!(),
     284              :         }
     285            0 :     }
     286              : 
     287              :     /// Access to SafeKeeper object. Panics if offloaded, should be good to use from WalResidentTimeline.
     288            0 :     pub fn safekeeper(
     289            0 :         &mut self,
     290            0 :     ) -> &mut SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage> {
     291            0 :         match self {
     292            0 :             StateSK::Loaded(sk) => sk,
     293              :             StateSK::Offloaded(_) => {
     294            0 :                 panic!("safekeeper is offloaded, cannot be used")
     295              :             }
     296            0 :             StateSK::Empty => unreachable!(),
     297              :         }
     298            0 :     }
     299              : 
     300              :     /// Moves control file's state structure out of the enum. Used to switch states.
     301            0 :     fn take_state(self) -> TimelineState<control_file::FileStorage> {
     302            0 :         match self {
     303            0 :             StateSK::Loaded(sk) => sk.state,
     304            0 :             StateSK::Offloaded(state) => *state,
     305            0 :             StateSK::Empty => unreachable!(),
     306              :         }
     307            0 :     }
     308              : }
     309              : 
     310              : /// Shared state associated with database instance
     311              : pub struct SharedState {
     312              :     /// Safekeeper object
     313              :     pub(crate) sk: StateSK,
     314              :     /// In memory list containing state of peers sent in latest messages from them.
     315              :     pub(crate) peers_info: PeersInfo,
     316              :     // True value hinders old WAL removal; this is used by snapshotting. We
     317              :     // could make it a counter, but there is no need to.
     318              :     pub(crate) wal_removal_on_hold: bool,
     319              : }
     320              : 
     321              : impl SharedState {
     322              :     /// Initialize fresh timeline state without persisting anything to disk.
     323            0 :     fn create_new(
     324            0 :         conf: &SafeKeeperConf,
     325            0 :         ttid: &TenantTimelineId,
     326            0 :         state: TimelinePersistentState,
     327            0 :     ) -> Result<Self> {
     328            0 :         if state.server.wal_seg_size == 0 {
     329            0 :             bail!(TimelineError::UninitializedWalSegSize(*ttid));
     330            0 :         }
     331            0 : 
     332            0 :         if state.server.pg_version == UNKNOWN_SERVER_VERSION {
     333            0 :             bail!(TimelineError::UninitialinzedPgVersion(*ttid));
     334            0 :         }
     335            0 : 
     336            0 :         if state.commit_lsn < state.local_start_lsn {
     337            0 :             bail!(
     338            0 :                 "commit_lsn {} is higher than local_start_lsn {}",
     339            0 :                 state.commit_lsn,
     340            0 :                 state.local_start_lsn
     341            0 :             );
     342            0 :         }
     343            0 : 
     344            0 :         // We don't want to write anything to disk, because we may have existing timeline there.
     345            0 :         // These functions should not change anything on disk.
     346            0 :         let timeline_dir = get_timeline_dir(conf, ttid);
     347            0 :         let control_store =
     348            0 :             control_file::FileStorage::create_new(timeline_dir.clone(), conf, state)?;
     349            0 :         let wal_store =
     350            0 :             wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?;
     351            0 :         let sk = SafeKeeper::new(TimelineState::new(control_store), wal_store, conf.my_id)?;
     352              : 
     353            0 :         Ok(Self {
     354            0 :             sk: StateSK::Loaded(sk),
     355            0 :             peers_info: PeersInfo(vec![]),
     356            0 :             wal_removal_on_hold: false,
     357            0 :         })
     358            0 :     }
     359              : 
     360              :     /// Restore SharedState from control file. If file doesn't exist, bails out.
     361            0 :     fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result<Self> {
     362            0 :         let timeline_dir = get_timeline_dir(conf, ttid);
     363            0 :         let control_store = control_file::FileStorage::restore_new(ttid, conf)?;
     364            0 :         if control_store.server.wal_seg_size == 0 {
     365            0 :             bail!(TimelineError::UninitializedWalSegSize(*ttid));
     366            0 :         }
     367              : 
     368            0 :         let sk = match control_store.eviction_state {
     369              :             EvictionState::Present => {
     370            0 :                 let wal_store =
     371            0 :                     wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?;
     372            0 :                 StateSK::Loaded(SafeKeeper::new(
     373            0 :                     TimelineState::new(control_store),
     374            0 :                     wal_store,
     375            0 :                     conf.my_id,
     376            0 :                 )?)
     377              :             }
     378              :             EvictionState::Offloaded(_) => {
     379            0 :                 StateSK::Offloaded(Box::new(TimelineState::new(control_store)))
     380              :             }
     381              :         };
     382              : 
     383            0 :         Ok(Self {
     384            0 :             sk,
     385            0 :             peers_info: PeersInfo(vec![]),
     386            0 :             wal_removal_on_hold: false,
     387            0 :         })
     388            0 :     }
     389              : 
     390            0 :     pub(crate) fn get_wal_seg_size(&self) -> usize {
     391            0 :         self.sk.state().server.wal_seg_size as usize
     392            0 :     }
     393              : 
     394            0 :     fn get_safekeeper_info(
     395            0 :         &self,
     396            0 :         ttid: &TenantTimelineId,
     397            0 :         conf: &SafeKeeperConf,
     398            0 :         standby_apply_lsn: Lsn,
     399            0 :     ) -> SafekeeperTimelineInfo {
     400            0 :         SafekeeperTimelineInfo {
     401            0 :             safekeeper_id: conf.my_id.0,
     402            0 :             tenant_timeline_id: Some(ProtoTenantTimelineId {
     403            0 :                 tenant_id: ttid.tenant_id.as_ref().to_owned(),
     404            0 :                 timeline_id: ttid.timeline_id.as_ref().to_owned(),
     405            0 :             }),
     406            0 :             term: self.sk.state().acceptor_state.term,
     407            0 :             last_log_term: self.sk.last_log_term(),
     408            0 :             flush_lsn: self.sk.flush_lsn().0,
     409            0 :             // note: this value is not flushed to control file yet and can be lost
     410            0 :             commit_lsn: self.sk.state().inmem.commit_lsn.0,
     411            0 :             remote_consistent_lsn: self.sk.state().inmem.remote_consistent_lsn.0,
     412            0 :             peer_horizon_lsn: self.sk.state().inmem.peer_horizon_lsn.0,
     413            0 :             safekeeper_connstr: conf
     414            0 :                 .advertise_pg_addr
     415            0 :                 .to_owned()
     416            0 :                 .unwrap_or(conf.listen_pg_addr.clone()),
     417            0 :             http_connstr: conf.listen_http_addr.to_owned(),
     418            0 :             backup_lsn: self.sk.state().inmem.backup_lsn.0,
     419            0 :             local_start_lsn: self.sk.state().local_start_lsn.0,
     420            0 :             availability_zone: conf.availability_zone.clone(),
     421            0 :             standby_horizon: standby_apply_lsn.0,
     422            0 :         }
     423            0 :     }
     424              : 
     425              :     /// Get our latest view of alive peers status on the timeline.
     426              :     /// We pass our own info through the broker as well, so when we don't have connection
     427              :     /// to the broker returned vec is empty.
     428            0 :     pub(crate) fn get_peers(&self, heartbeat_timeout: Duration) -> Vec<PeerInfo> {
     429            0 :         let now = Instant::now();
     430            0 :         self.peers_info
     431            0 :             .0
     432            0 :             .iter()
     433            0 :             // Regard peer as absent if we haven't heard from it within heartbeat_timeout.
     434            0 :             .filter(|p| now.duration_since(p.ts) <= heartbeat_timeout)
     435            0 :             .cloned()
     436            0 :             .collect()
     437            0 :     }
     438              : }
     439              : 
     440            0 : #[derive(Debug, thiserror::Error)]
     441              : pub enum TimelineError {
     442              :     #[error("Timeline {0} was cancelled and cannot be used anymore")]
     443              :     Cancelled(TenantTimelineId),
     444              :     #[error("Timeline {0} was not found in global map")]
     445              :     NotFound(TenantTimelineId),
     446              :     #[error("Timeline {0} exists on disk, but wasn't loaded on startup")]
     447              :     Invalid(TenantTimelineId),
     448              :     #[error("Timeline {0} is already exists")]
     449              :     AlreadyExists(TenantTimelineId),
     450              :     #[error("Timeline {0} is not initialized, wal_seg_size is zero")]
     451              :     UninitializedWalSegSize(TenantTimelineId),
     452              :     #[error("Timeline {0} is not initialized, pg_version is unknown")]
     453              :     UninitialinzedPgVersion(TenantTimelineId),
     454              : }
     455              : 
     456              : // Convert to HTTP API error.
     457              : impl From<TimelineError> for ApiError {
     458            0 :     fn from(te: TimelineError) -> ApiError {
     459            0 :         match te {
     460            0 :             TimelineError::NotFound(ttid) => {
     461            0 :                 ApiError::NotFound(anyhow!("timeline {} not found", ttid).into())
     462              :             }
     463            0 :             _ => ApiError::InternalServerError(anyhow!("{}", te)),
     464              :         }
     465            0 :     }
     466              : }
     467              : 
     468              : /// Timeline struct manages lifecycle (creation, deletion, restore) of a safekeeper timeline.
     469              : /// It also holds SharedState and provides mutually exclusive access to it.
     470              : pub struct Timeline {
     471              :     pub ttid: TenantTimelineId,
     472              : 
     473              :     /// Used to broadcast commit_lsn updates to all background jobs.
     474              :     commit_lsn_watch_tx: watch::Sender<Lsn>,
     475              :     commit_lsn_watch_rx: watch::Receiver<Lsn>,
     476              : 
     477              :     /// Broadcasts (current term, flush_lsn) updates, walsender is interested in
     478              :     /// them when sending in recovery mode (to walproposer or peers). Note: this
     479              :     /// is just a notification, WAL reading should always done with lock held as
     480              :     /// term can change otherwise.
     481              :     term_flush_lsn_watch_tx: watch::Sender<TermLsn>,
     482              :     term_flush_lsn_watch_rx: watch::Receiver<TermLsn>,
     483              : 
     484              :     /// Broadcasts shared state updates.
     485              :     shared_state_version_tx: watch::Sender<usize>,
     486              :     shared_state_version_rx: watch::Receiver<usize>,
     487              : 
     488              :     /// Safekeeper and other state, that should remain consistent and
     489              :     /// synchronized with the disk. This is tokio mutex as we write WAL to disk
     490              :     /// while holding it, ensuring that consensus checks are in order.
     491              :     mutex: RwLock<SharedState>,
     492              :     walsenders: Arc<WalSenders>,
     493              :     walreceivers: Arc<WalReceivers>,
     494              :     timeline_dir: Utf8PathBuf,
     495              :     manager_ctl: ManagerCtl,
     496              : 
     497              :     /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires
     498              :     pub(crate) cancel: CancellationToken,
     499              : 
     500              :     // timeline_manager controlled state
     501              :     pub(crate) broker_active: AtomicBool,
     502              :     pub(crate) wal_backup_active: AtomicBool,
     503              :     pub(crate) last_removed_segno: AtomicU64,
     504              :     pub(crate) mgr_status: AtomicStatus,
     505              : }
     506              : 
     507              : impl Timeline {
     508              :     /// Load existing timeline from disk.
     509            0 :     pub fn load_timeline(conf: &SafeKeeperConf, ttid: TenantTimelineId) -> Result<Timeline> {
     510            0 :         let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered();
     511              : 
     512            0 :         let shared_state = SharedState::restore(conf, &ttid)?;
     513            0 :         let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
     514            0 :             watch::channel(shared_state.sk.state().commit_lsn);
     515            0 :         let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((
     516            0 :             shared_state.sk.last_log_term(),
     517            0 :             shared_state.sk.flush_lsn(),
     518            0 :         )));
     519            0 :         let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
     520            0 : 
     521            0 :         let walreceivers = WalReceivers::new();
     522            0 :         Ok(Timeline {
     523            0 :             ttid,
     524            0 :             commit_lsn_watch_tx,
     525            0 :             commit_lsn_watch_rx,
     526            0 :             term_flush_lsn_watch_tx,
     527            0 :             term_flush_lsn_watch_rx,
     528            0 :             shared_state_version_tx,
     529            0 :             shared_state_version_rx,
     530            0 :             mutex: RwLock::new(shared_state),
     531            0 :             walsenders: WalSenders::new(walreceivers.clone()),
     532            0 :             walreceivers,
     533            0 :             cancel: CancellationToken::default(),
     534            0 :             timeline_dir: get_timeline_dir(conf, &ttid),
     535            0 :             manager_ctl: ManagerCtl::new(),
     536            0 :             broker_active: AtomicBool::new(false),
     537            0 :             wal_backup_active: AtomicBool::new(false),
     538            0 :             last_removed_segno: AtomicU64::new(0),
     539            0 :             mgr_status: AtomicStatus::new(),
     540            0 :         })
     541            0 :     }
     542              : 
     543              :     /// Create a new timeline, which is not yet persisted to disk.
     544            0 :     pub fn create_empty(
     545            0 :         conf: &SafeKeeperConf,
     546            0 :         ttid: TenantTimelineId,
     547            0 :         server_info: ServerInfo,
     548            0 :         commit_lsn: Lsn,
     549            0 :         local_start_lsn: Lsn,
     550            0 :     ) -> Result<Timeline> {
     551            0 :         let (commit_lsn_watch_tx, commit_lsn_watch_rx) = watch::channel(Lsn::INVALID);
     552            0 :         let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) =
     553            0 :             watch::channel(TermLsn::from((INVALID_TERM, Lsn::INVALID)));
     554            0 :         let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0);
     555            0 : 
     556            0 :         let state =
     557            0 :             TimelinePersistentState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn);
     558            0 : 
     559            0 :         let walreceivers = WalReceivers::new();
     560            0 :         Ok(Timeline {
     561            0 :             ttid,
     562            0 :             commit_lsn_watch_tx,
     563            0 :             commit_lsn_watch_rx,
     564            0 :             term_flush_lsn_watch_tx,
     565            0 :             term_flush_lsn_watch_rx,
     566            0 :             shared_state_version_tx,
     567            0 :             shared_state_version_rx,
     568            0 :             mutex: RwLock::new(SharedState::create_new(conf, &ttid, state)?),
     569            0 :             walsenders: WalSenders::new(walreceivers.clone()),
     570            0 :             walreceivers,
     571            0 :             cancel: CancellationToken::default(),
     572            0 :             timeline_dir: get_timeline_dir(conf, &ttid),
     573            0 :             manager_ctl: ManagerCtl::new(),
     574            0 :             broker_active: AtomicBool::new(false),
     575            0 :             wal_backup_active: AtomicBool::new(false),
     576            0 :             last_removed_segno: AtomicU64::new(0),
     577            0 :             mgr_status: AtomicStatus::new(),
     578              :         })
     579            0 :     }
     580              : 
     581              :     /// Initialize fresh timeline on disk and start background tasks. If init
     582              :     /// fails, timeline is cancelled and cannot be used anymore.
     583              :     ///
     584              :     /// Init is transactional, so if it fails, created files will be deleted,
     585              :     /// and state on disk should remain unchanged.
     586            0 :     pub async fn init_new(
     587            0 :         self: &Arc<Timeline>,
     588            0 :         shared_state: &mut WriteGuardSharedState<'_>,
     589            0 :         conf: &SafeKeeperConf,
     590            0 :         broker_active_set: Arc<TimelinesSet>,
     591            0 :         partial_backup_rate_limiter: RateLimiter,
     592            0 :     ) -> Result<()> {
     593            0 :         match fs::metadata(&self.timeline_dir).await {
     594              :             Ok(_) => {
     595              :                 // Timeline directory exists on disk, we should leave state unchanged
     596              :                 // and return error.
     597            0 :                 bail!(TimelineError::Invalid(self.ttid));
     598              :             }
     599            0 :             Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
     600            0 :             Err(e) => {
     601            0 :                 return Err(e.into());
     602              :             }
     603              :         }
     604              : 
     605              :         // Create timeline directory.
     606            0 :         fs::create_dir_all(&self.timeline_dir).await?;
     607              : 
     608              :         // Write timeline to disk and start background tasks.
     609            0 :         if let Err(e) = shared_state.sk.state_mut().flush().await {
     610              :             // Bootstrap failed, cancel timeline and remove timeline directory.
     611            0 :             self.cancel(shared_state);
     612              : 
     613            0 :             if let Err(fs_err) = fs::remove_dir_all(&self.timeline_dir).await {
     614            0 :                 warn!(
     615            0 :                     "failed to remove timeline {} directory after bootstrap failure: {}",
     616            0 :                     self.ttid, fs_err
     617              :                 );
     618            0 :             }
     619              : 
     620            0 :             return Err(e);
     621            0 :         }
     622            0 :         self.bootstrap(conf, broker_active_set, partial_backup_rate_limiter);
     623            0 :         Ok(())
     624            0 :     }
     625              : 
     626              :     /// Bootstrap new or existing timeline starting background tasks.
     627            0 :     pub fn bootstrap(
     628            0 :         self: &Arc<Timeline>,
     629            0 :         conf: &SafeKeeperConf,
     630            0 :         broker_active_set: Arc<TimelinesSet>,
     631            0 :         partial_backup_rate_limiter: RateLimiter,
     632            0 :     ) {
     633            0 :         let (tx, rx) = self.manager_ctl.bootstrap_manager();
     634            0 : 
     635            0 :         // Start manager task which will monitor timeline state and update
     636            0 :         // background tasks.
     637            0 :         tokio::spawn(timeline_manager::main_task(
     638            0 :             ManagerTimeline { tli: self.clone() },
     639            0 :             conf.clone(),
     640            0 :             broker_active_set,
     641            0 :             tx,
     642            0 :             rx,
     643            0 :             partial_backup_rate_limiter,
     644            0 :         ));
     645            0 :     }
     646              : 
     647              :     /// Delete timeline from disk completely, by removing timeline directory.
     648              :     /// Background timeline activities will stop eventually.
     649              :     ///
     650              :     /// Also deletes WAL in s3. Might fail if e.g. s3 is unavailable, but
     651              :     /// deletion API endpoint is retriable.
     652            0 :     pub async fn delete(
     653            0 :         &self,
     654            0 :         shared_state: &mut WriteGuardSharedState<'_>,
     655            0 :         only_local: bool,
     656            0 :     ) -> Result<bool> {
     657            0 :         self.cancel(shared_state);
     658            0 : 
     659            0 :         // TODO: It's better to wait for s3 offloader termination before
     660            0 :         // removing data from s3. Though since s3 doesn't have transactions it
     661            0 :         // still wouldn't guarantee absense of data after removal.
     662            0 :         let conf = GlobalTimelines::get_global_config();
     663            0 :         if !only_local && conf.is_wal_backup_enabled() {
     664              :             // Note: we concurrently delete remote storage data from multiple
     665              :             // safekeepers. That's ok, s3 replies 200 if object doesn't exist and we
     666              :             // do some retries anyway.
     667            0 :             wal_backup::delete_timeline(&self.ttid).await?;
     668            0 :         }
     669            0 :         let dir_existed = delete_dir(&self.timeline_dir).await?;
     670            0 :         Ok(dir_existed)
     671            0 :     }
     672              : 
     673              :     /// Cancel timeline to prevent further usage. Background tasks will stop
     674              :     /// eventually after receiving cancellation signal.
     675            0 :     fn cancel(&self, shared_state: &mut WriteGuardSharedState<'_>) {
     676            0 :         info!("timeline {} is cancelled", self.ttid);
     677            0 :         self.cancel.cancel();
     678            0 :         // Close associated FDs. Nobody will be able to touch timeline data once
     679            0 :         // it is cancelled, so WAL storage won't be opened again.
     680            0 :         shared_state.sk.close_wal_store();
     681            0 :     }
     682              : 
     683              :     /// Returns if timeline is cancelled.
     684            0 :     pub fn is_cancelled(&self) -> bool {
     685            0 :         self.cancel.is_cancelled()
     686            0 :     }
     687              : 
     688              :     /// Take a writing mutual exclusive lock on timeline shared_state.
     689            0 :     pub async fn write_shared_state<'a>(self: &'a Arc<Self>) -> WriteGuardSharedState<'a> {
     690            0 :         WriteGuardSharedState::new(self.clone(), self.mutex.write().await)
     691            0 :     }
     692              : 
     693            0 :     pub async fn read_shared_state(&self) -> ReadGuardSharedState {
     694            0 :         self.mutex.read().await
     695            0 :     }
     696              : 
     697              :     /// Returns commit_lsn watch channel.
     698            0 :     pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver<Lsn> {
     699            0 :         self.commit_lsn_watch_rx.clone()
     700            0 :     }
     701              : 
     702              :     /// Returns term_flush_lsn watch channel.
     703            0 :     pub fn get_term_flush_lsn_watch_rx(&self) -> watch::Receiver<TermLsn> {
     704            0 :         self.term_flush_lsn_watch_rx.clone()
     705            0 :     }
     706              : 
     707              :     /// Returns watch channel for SharedState update version.
     708            0 :     pub fn get_state_version_rx(&self) -> watch::Receiver<usize> {
     709            0 :         self.shared_state_version_rx.clone()
     710            0 :     }
     711              : 
     712              :     /// Returns wal_seg_size.
     713            0 :     pub async fn get_wal_seg_size(&self) -> usize {
     714            0 :         self.read_shared_state().await.get_wal_seg_size()
     715            0 :     }
     716              : 
     717              :     /// Returns state of the timeline.
     718            0 :     pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) {
     719            0 :         let state = self.read_shared_state().await;
     720            0 :         (
     721            0 :             state.sk.state().inmem.clone(),
     722            0 :             TimelinePersistentState::clone(state.sk.state()),
     723            0 :         )
     724            0 :     }
     725              : 
     726              :     /// Returns latest backup_lsn.
     727            0 :     pub async fn get_wal_backup_lsn(&self) -> Lsn {
     728            0 :         self.read_shared_state().await.sk.state().inmem.backup_lsn
     729            0 :     }
     730              : 
     731              :     /// Sets backup_lsn to the given value.
     732            0 :     pub async fn set_wal_backup_lsn(self: &Arc<Self>, backup_lsn: Lsn) -> Result<()> {
     733            0 :         if self.is_cancelled() {
     734            0 :             bail!(TimelineError::Cancelled(self.ttid));
     735            0 :         }
     736              : 
     737            0 :         let mut state = self.write_shared_state().await;
     738            0 :         state.sk.state_mut().inmem.backup_lsn = max(state.sk.state().inmem.backup_lsn, backup_lsn);
     739            0 :         // we should check whether to shut down offloader, but this will be done
     740            0 :         // soon by peer communication anyway.
     741            0 :         Ok(())
     742            0 :     }
     743              : 
     744              :     /// Get safekeeper info for broadcasting to broker and other peers.
     745            0 :     pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo {
     746            0 :         let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn;
     747            0 :         let shared_state = self.read_shared_state().await;
     748            0 :         shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn)
     749            0 :     }
     750              : 
     751              :     /// Update timeline state with peer safekeeper data.
     752            0 :     pub async fn record_safekeeper_info(
     753            0 :         self: &Arc<Self>,
     754            0 :         sk_info: SafekeeperTimelineInfo,
     755            0 :     ) -> Result<()> {
     756              :         {
     757            0 :             let mut shared_state = self.write_shared_state().await;
     758            0 :             shared_state.sk.record_safekeeper_info(&sk_info).await?;
     759            0 :             let peer_info = PeerInfo::from_sk_info(&sk_info, Instant::now());
     760            0 :             shared_state.peers_info.upsert(&peer_info);
     761            0 :         }
     762            0 :         Ok(())
     763            0 :     }
     764              : 
     765            0 :     pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec<PeerInfo> {
     766            0 :         let shared_state = self.read_shared_state().await;
     767            0 :         shared_state.get_peers(conf.heartbeat_timeout)
     768            0 :     }
     769              : 
     770            0 :     pub fn get_walsenders(&self) -> &Arc<WalSenders> {
     771            0 :         &self.walsenders
     772            0 :     }
     773              : 
     774            0 :     pub fn get_walreceivers(&self) -> &Arc<WalReceivers> {
     775            0 :         &self.walreceivers
     776            0 :     }
     777              : 
     778              :     /// Returns flush_lsn.
     779            0 :     pub async fn get_flush_lsn(&self) -> Lsn {
     780            0 :         self.read_shared_state().await.sk.flush_lsn()
     781            0 :     }
     782              : 
     783              :     /// Gather timeline data for metrics.
     784            0 :     pub async fn info_for_metrics(&self) -> Option<FullTimelineInfo> {
     785            0 :         if self.is_cancelled() {
     786            0 :             return None;
     787            0 :         }
     788            0 : 
     789            0 :         let (ps_feedback_count, last_ps_feedback) = self.walsenders.get_ps_feedback_stats();
     790            0 :         let state = self.read_shared_state().await;
     791            0 :         Some(FullTimelineInfo {
     792            0 :             ttid: self.ttid,
     793            0 :             ps_feedback_count,
     794            0 :             last_ps_feedback,
     795            0 :             wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
     796            0 :             timeline_is_active: self.broker_active.load(Ordering::Relaxed),
     797            0 :             num_computes: self.walreceivers.get_num() as u32,
     798            0 :             last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
     799            0 :             epoch_start_lsn: state.sk.term_start_lsn(),
     800            0 :             mem_state: state.sk.state().inmem.clone(),
     801            0 :             persisted_state: TimelinePersistentState::clone(state.sk.state()),
     802            0 :             flush_lsn: state.sk.flush_lsn(),
     803            0 :             wal_storage: state.sk.wal_storage_metrics(),
     804            0 :         })
     805            0 :     }
     806              : 
     807              :     /// Returns in-memory timeline state to build a full debug dump.
     808            0 :     pub async fn memory_dump(&self) -> debug_dump::Memory {
     809            0 :         let state = self.read_shared_state().await;
     810              : 
     811            0 :         let (write_lsn, write_record_lsn, flush_lsn, file_open) =
     812            0 :             state.sk.wal_storage_internal_state();
     813            0 : 
     814            0 :         debug_dump::Memory {
     815            0 :             is_cancelled: self.is_cancelled(),
     816            0 :             peers_info_len: state.peers_info.0.len(),
     817            0 :             walsenders: self.walsenders.get_all(),
     818            0 :             wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed),
     819            0 :             active: self.broker_active.load(Ordering::Relaxed),
     820            0 :             num_computes: self.walreceivers.get_num() as u32,
     821            0 :             last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed),
     822            0 :             epoch_start_lsn: state.sk.term_start_lsn(),
     823            0 :             mem_state: state.sk.state().inmem.clone(),
     824            0 :             mgr_status: self.mgr_status.get(),
     825            0 :             write_lsn,
     826            0 :             write_record_lsn,
     827            0 :             flush_lsn,
     828            0 :             file_open,
     829            0 :         }
     830            0 :     }
     831              : 
     832              :     /// Apply a function to the control file state and persist it.
     833            0 :     pub async fn map_control_file<T>(
     834            0 :         self: &Arc<Self>,
     835            0 :         f: impl FnOnce(&mut TimelinePersistentState) -> Result<T>,
     836            0 :     ) -> Result<T> {
     837            0 :         let mut state = self.write_shared_state().await;
     838            0 :         let mut persistent_state = state.sk.state_mut().start_change();
     839              :         // If f returns error, we abort the change and don't persist anything.
     840            0 :         let res = f(&mut persistent_state)?;
     841              :         // If persisting fails, we abort the change and return error.
     842            0 :         state
     843            0 :             .sk
     844            0 :             .state_mut()
     845            0 :             .finish_change(&persistent_state)
     846            0 :             .await?;
     847            0 :         Ok(res)
     848            0 :     }
     849              : 
     850              :     /// Get the timeline guard for reading/writing WAL files.
     851              :     /// If WAL files are not present on disk (evicted), they will be automatically
     852              :     /// downloaded from remote storage. This is done in the manager task, which is
     853              :     /// responsible for issuing all guards.
     854              :     ///
     855              :     /// NB: don't use this function from timeline_manager, it will deadlock.
     856              :     /// NB: don't use this function while holding shared_state lock.
     857            0 :     pub async fn wal_residence_guard(self: &Arc<Self>) -> Result<WalResidentTimeline> {
     858            0 :         if self.is_cancelled() {
     859            0 :             bail!(TimelineError::Cancelled(self.ttid));
     860            0 :         }
     861            0 : 
     862            0 :         debug!("requesting WalResidentTimeline guard");
     863            0 :         let started_at = Instant::now();
     864            0 :         let status_before = self.mgr_status.get();
     865              : 
     866              :         // Wait 30 seconds for the guard to be acquired. It can time out if someone is
     867              :         // holding the lock (e.g. during `SafeKeeper::process_msg()`) or manager task
     868              :         // is stuck.
     869            0 :         let res = tokio::time::timeout_at(
     870            0 :             started_at + Duration::from_secs(30),
     871            0 :             self.manager_ctl.wal_residence_guard(),
     872            0 :         )
     873            0 :         .await;
     874              : 
     875            0 :         let guard = match res {
     876            0 :             Ok(Ok(guard)) => {
     877            0 :                 let finished_at = Instant::now();
     878            0 :                 let elapsed = finished_at - started_at;
     879            0 :                 MISC_OPERATION_SECONDS
     880            0 :                     .with_label_values(&["wal_residence_guard"])
     881            0 :                     .observe(elapsed.as_secs_f64());
     882            0 : 
     883            0 :                 guard
     884              :             }
     885            0 :             Ok(Err(e)) => {
     886            0 :                 warn!(
     887            0 :                     "error while acquiring WalResidentTimeline guard, statuses {:?} => {:?}",
     888            0 :                     status_before,
     889            0 :                     self.mgr_status.get()
     890              :                 );
     891            0 :                 return Err(e);
     892              :             }
     893              :             Err(_) => {
     894            0 :                 warn!(
     895            0 :                     "timeout while acquiring WalResidentTimeline guard, statuses {:?} => {:?}",
     896            0 :                     status_before,
     897            0 :                     self.mgr_status.get()
     898              :                 );
     899            0 :                 anyhow::bail!("timeout while acquiring WalResidentTimeline guard");
     900              :             }
     901              :         };
     902              : 
     903            0 :         Ok(WalResidentTimeline::new(self.clone(), guard))
     904            0 :     }
     905              : }
     906              : 
     907              : /// This is a guard that allows to read/write disk timeline state.
     908              : /// All tasks that are trying to read/write WAL from disk should use this guard.
     909              : pub struct WalResidentTimeline {
     910              :     pub tli: Arc<Timeline>,
     911              :     _guard: ResidenceGuard,
     912              : }
     913              : 
     914              : impl WalResidentTimeline {
     915            0 :     pub fn new(tli: Arc<Timeline>, _guard: ResidenceGuard) -> Self {
     916            0 :         WalResidentTimeline { tli, _guard }
     917            0 :     }
     918              : }
     919              : 
     920              : impl Deref for WalResidentTimeline {
     921              :     type Target = Arc<Timeline>;
     922              : 
     923            0 :     fn deref(&self) -> &Self::Target {
     924            0 :         &self.tli
     925            0 :     }
     926              : }
     927              : 
     928              : impl WalResidentTimeline {
     929              :     /// Returns true if walsender should stop sending WAL to pageserver. We
     930              :     /// terminate it if remote_consistent_lsn reached commit_lsn and there is no
     931              :     /// computes. While there might be nothing to stream already, we learn about
     932              :     /// remote_consistent_lsn update through replication feedback, and we want
     933              :     /// to stop pushing to the broker if pageserver is fully caughtup.
     934            0 :     pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool {
     935            0 :         if self.is_cancelled() {
     936            0 :             return true;
     937            0 :         }
     938            0 :         let shared_state = self.read_shared_state().await;
     939            0 :         if self.walreceivers.get_num() == 0 {
     940            0 :             return shared_state.sk.state().inmem.commit_lsn == Lsn(0) || // no data at all yet
     941            0 :             reported_remote_consistent_lsn >= shared_state.sk.state().inmem.commit_lsn;
     942            0 :         }
     943            0 :         false
     944            0 :     }
     945              : 
     946              :     /// Ensure that current term is t, erroring otherwise, and lock the state.
     947            0 :     pub async fn acquire_term(&self, t: Term) -> Result<ReadGuardSharedState> {
     948            0 :         let ss = self.read_shared_state().await;
     949            0 :         if ss.sk.state().acceptor_state.term != t {
     950            0 :             bail!(
     951            0 :                 "failed to acquire term {}, current term {}",
     952            0 :                 t,
     953            0 :                 ss.sk.state().acceptor_state.term
     954            0 :             );
     955            0 :         }
     956            0 :         Ok(ss)
     957            0 :     }
     958              : 
     959              :     /// Pass arrived message to the safekeeper.
     960            0 :     pub async fn process_msg(
     961            0 :         &self,
     962            0 :         msg: &ProposerAcceptorMessage,
     963            0 :     ) -> Result<Option<AcceptorProposerMessage>> {
     964            0 :         if self.is_cancelled() {
     965            0 :             bail!(TimelineError::Cancelled(self.ttid));
     966            0 :         }
     967              : 
     968              :         let mut rmsg: Option<AcceptorProposerMessage>;
     969              :         {
     970            0 :             let mut shared_state = self.write_shared_state().await;
     971            0 :             rmsg = shared_state.sk.safekeeper().process_msg(msg).await?;
     972              : 
     973              :             // if this is AppendResponse, fill in proper hot standby feedback.
     974            0 :             if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg {
     975            0 :                 resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback;
     976            0 :             }
     977              :         }
     978            0 :         Ok(rmsg)
     979            0 :     }
     980              : 
     981            0 :     pub async fn get_walreader(&self, start_lsn: Lsn) -> Result<WalReader> {
     982            0 :         let (_, persisted_state) = self.get_state().await;
     983            0 :         let enable_remote_read = GlobalTimelines::get_global_config().is_wal_backup_enabled();
     984            0 : 
     985            0 :         WalReader::new(
     986            0 :             &self.ttid,
     987            0 :             self.timeline_dir.clone(),
     988            0 :             &persisted_state,
     989            0 :             start_lsn,
     990            0 :             enable_remote_read,
     991            0 :         )
     992            0 :     }
     993              : 
     994            0 :     pub fn get_timeline_dir(&self) -> Utf8PathBuf {
     995            0 :         self.timeline_dir.clone()
     996            0 :     }
     997              : 
     998              :     /// Update in memory remote consistent lsn.
     999            0 :     pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) {
    1000            0 :         let mut shared_state = self.write_shared_state().await;
    1001            0 :         shared_state.sk.state_mut().inmem.remote_consistent_lsn = max(
    1002            0 :             shared_state.sk.state().inmem.remote_consistent_lsn,
    1003            0 :             candidate,
    1004            0 :         );
    1005            0 :     }
    1006              : }
    1007              : 
    1008              : /// This struct contains methods that are used by timeline manager task.
    1009              : pub(crate) struct ManagerTimeline {
    1010              :     pub(crate) tli: Arc<Timeline>,
    1011              : }
    1012              : 
    1013              : impl Deref for ManagerTimeline {
    1014              :     type Target = Arc<Timeline>;
    1015              : 
    1016            0 :     fn deref(&self) -> &Self::Target {
    1017            0 :         &self.tli
    1018            0 :     }
    1019              : }
    1020              : 
    1021              : impl ManagerTimeline {
    1022            0 :     pub(crate) fn timeline_dir(&self) -> &Utf8PathBuf {
    1023            0 :         &self.tli.timeline_dir
    1024            0 :     }
    1025              : 
    1026              :     /// Manager requests this state on startup.
    1027            0 :     pub(crate) async fn bootstrap_mgr(&self) -> (bool, Option<PartialRemoteSegment>) {
    1028            0 :         let shared_state = self.read_shared_state().await;
    1029            0 :         let is_offloaded = matches!(
    1030            0 :             shared_state.sk.state().eviction_state,
    1031              :             EvictionState::Offloaded(_)
    1032              :         );
    1033            0 :         let partial_backup_uploaded = shared_state.sk.state().partial_backup.uploaded_segment();
    1034            0 : 
    1035            0 :         (is_offloaded, partial_backup_uploaded)
    1036            0 :     }
    1037              : 
    1038              :     /// Try to switch state Present->Offloaded.
    1039            0 :     pub(crate) async fn switch_to_offloaded(
    1040            0 :         &self,
    1041            0 :         partial: &PartialRemoteSegment,
    1042            0 :     ) -> anyhow::Result<()> {
    1043            0 :         let mut shared = self.write_shared_state().await;
    1044              : 
    1045              :         // updating control file
    1046            0 :         let mut pstate = shared.sk.state_mut().start_change();
    1047              : 
    1048            0 :         if !matches!(pstate.eviction_state, EvictionState::Present) {
    1049            0 :             bail!(
    1050            0 :                 "cannot switch to offloaded state, current state is {:?}",
    1051            0 :                 pstate.eviction_state
    1052            0 :             );
    1053            0 :         }
    1054            0 : 
    1055            0 :         if partial.flush_lsn != shared.sk.flush_lsn() {
    1056            0 :             bail!(
    1057            0 :                 "flush_lsn mismatch in partial backup, expected {}, got {}",
    1058            0 :                 shared.sk.flush_lsn(),
    1059            0 :                 partial.flush_lsn
    1060            0 :             );
    1061            0 :         }
    1062            0 : 
    1063            0 :         if partial.commit_lsn != pstate.commit_lsn {
    1064            0 :             bail!(
    1065            0 :                 "commit_lsn mismatch in partial backup, expected {}, got {}",
    1066            0 :                 pstate.commit_lsn,
    1067            0 :                 partial.commit_lsn
    1068            0 :             );
    1069            0 :         }
    1070            0 : 
    1071            0 :         if partial.term != shared.sk.last_log_term() {
    1072            0 :             bail!(
    1073            0 :                 "term mismatch in partial backup, expected {}, got {}",
    1074            0 :                 shared.sk.last_log_term(),
    1075            0 :                 partial.term
    1076            0 :             );
    1077            0 :         }
    1078            0 : 
    1079            0 :         pstate.eviction_state = EvictionState::Offloaded(shared.sk.flush_lsn());
    1080            0 :         shared.sk.state_mut().finish_change(&pstate).await?;
    1081              :         // control file is now switched to Offloaded state
    1082              : 
    1083              :         // now we can switch shared.sk to Offloaded, shouldn't fail
    1084            0 :         let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
    1085            0 :         let cfile_state = prev_sk.take_state();
    1086            0 :         shared.sk = StateSK::Offloaded(Box::new(cfile_state));
    1087            0 : 
    1088            0 :         Ok(())
    1089            0 :     }
    1090              : 
    1091              :     /// Try to switch state Offloaded->Present.
    1092            0 :     pub(crate) async fn switch_to_present(&self) -> anyhow::Result<()> {
    1093            0 :         let conf = GlobalTimelines::get_global_config();
    1094            0 :         let mut shared = self.write_shared_state().await;
    1095              : 
    1096              :         // trying to restore WAL storage
    1097            0 :         let wal_store = wal_storage::PhysicalStorage::new(
    1098            0 :             &self.ttid,
    1099            0 :             self.timeline_dir.clone(),
    1100            0 :             &conf,
    1101            0 :             shared.sk.state(),
    1102            0 :         )?;
    1103              : 
    1104              :         // updating control file
    1105            0 :         let mut pstate = shared.sk.state_mut().start_change();
    1106              : 
    1107            0 :         if !matches!(pstate.eviction_state, EvictionState::Offloaded(_)) {
    1108            0 :             bail!(
    1109            0 :                 "cannot switch to present state, current state is {:?}",
    1110            0 :                 pstate.eviction_state
    1111            0 :             );
    1112            0 :         }
    1113            0 : 
    1114            0 :         if wal_store.flush_lsn() != shared.sk.flush_lsn() {
    1115            0 :             bail!(
    1116            0 :                 "flush_lsn mismatch in restored WAL, expected {}, got {}",
    1117            0 :                 shared.sk.flush_lsn(),
    1118            0 :                 wal_store.flush_lsn()
    1119            0 :             );
    1120            0 :         }
    1121            0 : 
    1122            0 :         pstate.eviction_state = EvictionState::Present;
    1123            0 :         shared.sk.state_mut().finish_change(&pstate).await?;
    1124              : 
    1125              :         // now we can switch shared.sk to Present, shouldn't fail
    1126            0 :         let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty);
    1127            0 :         let cfile_state = prev_sk.take_state();
    1128            0 :         shared.sk = StateSK::Loaded(SafeKeeper::new(cfile_state, wal_store, conf.my_id)?);
    1129              : 
    1130            0 :         Ok(())
    1131            0 :     }
    1132              : 
    1133              :     /// Update current manager state, useful for debugging manager deadlocks.
    1134            0 :     pub(crate) fn set_status(&self, status: timeline_manager::Status) {
    1135            0 :         self.mgr_status.store(status, Ordering::Relaxed);
    1136            0 :     }
    1137              : }
    1138              : 
    1139              : /// Deletes directory and it's contents. Returns false if directory does not exist.
    1140            0 : async fn delete_dir(path: &Utf8PathBuf) -> Result<bool> {
    1141            0 :     match fs::remove_dir_all(path).await {
    1142            0 :         Ok(_) => Ok(true),
    1143            0 :         Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
    1144            0 :         Err(e) => Err(e.into()),
    1145              :     }
    1146            0 : }
    1147              : 
    1148              : /// Get a path to the tenant directory. If you just need to get a timeline directory,
    1149              : /// use WalResidentTimeline::get_timeline_dir instead.
    1150           14 : pub(crate) fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf {
    1151           14 :     conf.workdir.join(tenant_id.to_string())
    1152           14 : }
    1153              : 
    1154              : /// Get a path to the timeline directory. If you need to read WAL files from disk,
    1155              : /// use WalResidentTimeline::get_timeline_dir instead. This function does not check
    1156              : /// timeline eviction status and WAL files might not be present on disk.
    1157           14 : pub(crate) fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf {
    1158           14 :     get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string())
    1159           14 : }
        

Generated by: LCOV version 2.1-beta