LCOV - code coverage report
Current view: top level - safekeeper/src - timelines_global_map.rs (source / functions) Coverage Total Hit
Test: 12c2fc96834f59604b8ade5b9add28f1dce41ec6.info Lines: 0.0 % 268 0
Test Date: 2024-07-03 15:33:13 Functions: 0.0 % 30 0

            Line data    Source code
       1              : //! This module contains global `(tenant_id, timeline_id)` -> `Arc<Timeline>` mapping.
       2              : //! All timelines should always be present in this map, this is done by loading them
       3              : //! all from the disk on startup and keeping them in memory.
       4              : 
       5              : use crate::safekeeper::ServerInfo;
       6              : use crate::timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError};
       7              : use crate::timelines_set::TimelinesSet;
       8              : use crate::wal_backup_partial::RateLimiter;
       9              : use crate::SafeKeeperConf;
      10              : use anyhow::{bail, Context, Result};
      11              : use camino::Utf8PathBuf;
      12              : use once_cell::sync::Lazy;
      13              : use serde::Serialize;
      14              : use std::collections::HashMap;
      15              : use std::str::FromStr;
      16              : use std::sync::atomic::Ordering;
      17              : use std::sync::{Arc, Mutex};
      18              : use tracing::*;
      19              : use utils::id::{TenantId, TenantTimelineId, TimelineId};
      20              : use utils::lsn::Lsn;
      21              : 
      22              : struct GlobalTimelinesState {
      23              :     timelines: HashMap<TenantTimelineId, Arc<Timeline>>,
      24              :     conf: Option<SafeKeeperConf>,
      25              :     broker_active_set: Arc<TimelinesSet>,
      26              :     load_lock: Arc<tokio::sync::Mutex<TimelineLoadLock>>,
      27              :     partial_backup_rate_limiter: RateLimiter,
      28              : }
      29              : 
      30              : // Used to prevent concurrent timeline loading.
      31              : pub struct TimelineLoadLock;
      32              : 
      33              : impl GlobalTimelinesState {
      34              :     /// Get configuration, which must be set once during init.
      35            0 :     fn get_conf(&self) -> &SafeKeeperConf {
      36            0 :         self.conf
      37            0 :             .as_ref()
      38            0 :             .expect("GlobalTimelinesState conf is not initialized")
      39            0 :     }
      40              : 
      41              :     /// Get dependencies for a timeline constructor.
      42            0 :     fn get_dependencies(&self) -> (SafeKeeperConf, Arc<TimelinesSet>, RateLimiter) {
      43            0 :         (
      44            0 :             self.get_conf().clone(),
      45            0 :             self.broker_active_set.clone(),
      46            0 :             self.partial_backup_rate_limiter.clone(),
      47            0 :         )
      48            0 :     }
      49              : 
      50              :     /// Insert timeline into the map. Returns error if timeline with the same id already exists.
      51            0 :     fn try_insert(&mut self, timeline: Arc<Timeline>) -> Result<()> {
      52            0 :         let ttid = timeline.ttid;
      53            0 :         if self.timelines.contains_key(&ttid) {
      54            0 :             bail!(TimelineError::AlreadyExists(ttid));
      55            0 :         }
      56            0 :         self.timelines.insert(ttid, timeline);
      57            0 :         Ok(())
      58            0 :     }
      59              : 
      60              :     /// Get timeline from the map. Returns error if timeline doesn't exist.
      61            0 :     fn get(&self, ttid: &TenantTimelineId) -> Result<Arc<Timeline>, TimelineError> {
      62            0 :         self.timelines
      63            0 :             .get(ttid)
      64            0 :             .cloned()
      65            0 :             .ok_or(TimelineError::NotFound(*ttid))
      66            0 :     }
      67              : }
      68              : 
      69            0 : static TIMELINES_STATE: Lazy<Mutex<GlobalTimelinesState>> = Lazy::new(|| {
      70            0 :     Mutex::new(GlobalTimelinesState {
      71            0 :         timelines: HashMap::new(),
      72            0 :         conf: None,
      73            0 :         broker_active_set: Arc::new(TimelinesSet::default()),
      74            0 :         load_lock: Arc::new(tokio::sync::Mutex::new(TimelineLoadLock)),
      75            0 :         partial_backup_rate_limiter: RateLimiter::new(1),
      76            0 :     })
      77            0 : });
      78              : 
      79              : /// A zero-sized struct used to manage access to the global timelines map.
      80              : pub struct GlobalTimelines;
      81              : 
      82              : impl GlobalTimelines {
      83              :     /// Inject dependencies needed for the timeline constructors and load all timelines to memory.
      84            0 :     pub async fn init(conf: SafeKeeperConf) -> Result<()> {
      85            0 :         // clippy isn't smart enough to understand that drop(state) releases the
      86            0 :         // lock, so use explicit block
      87            0 :         let tenants_dir = {
      88            0 :             let mut state = TIMELINES_STATE.lock().unwrap();
      89            0 :             state.partial_backup_rate_limiter = RateLimiter::new(conf.partial_backup_concurrency);
      90            0 :             state.conf = Some(conf);
      91            0 : 
      92            0 :             // Iterate through all directories and load tenants for all directories
      93            0 :             // named as a valid tenant_id.
      94            0 :             state.get_conf().workdir.clone()
      95            0 :         };
      96            0 :         let mut tenant_count = 0;
      97            0 :         for tenants_dir_entry in std::fs::read_dir(&tenants_dir)
      98            0 :             .with_context(|| format!("failed to list tenants dir {}", tenants_dir))?
      99              :         {
     100            0 :             match &tenants_dir_entry {
     101            0 :                 Ok(tenants_dir_entry) => {
     102            0 :                     if let Ok(tenant_id) =
     103            0 :                         TenantId::from_str(tenants_dir_entry.file_name().to_str().unwrap_or(""))
     104              :                     {
     105            0 :                         tenant_count += 1;
     106            0 :                         GlobalTimelines::load_tenant_timelines(tenant_id).await?;
     107            0 :                     }
     108              :                 }
     109            0 :                 Err(e) => error!(
     110            0 :                     "failed to list tenants dir entry {:?} in directory {}, reason: {:?}",
     111              :                     tenants_dir_entry, tenants_dir, e
     112              :                 ),
     113              :             }
     114              :         }
     115              : 
     116            0 :         info!(
     117            0 :             "found {} tenants directories, successfully loaded {} timelines",
     118            0 :             tenant_count,
     119            0 :             TIMELINES_STATE.lock().unwrap().timelines.len()
     120              :         );
     121            0 :         Ok(())
     122            0 :     }
     123              : 
     124              :     /// Loads all timelines for the given tenant to memory. Returns fs::read_dir
     125              :     /// errors if any.
     126              :     ///
     127              :     /// It is async for update_status_notify sake. Since TIMELINES_STATE lock is
     128              :     /// sync and there is no important reason to make it async (it is always
     129              :     /// held for a short while) we just lock and unlock it for each timeline --
     130              :     /// this function is called during init when nothing else is running, so
     131              :     /// this is fine.
     132            0 :     async fn load_tenant_timelines(tenant_id: TenantId) -> Result<()> {
     133            0 :         let (conf, broker_active_set, partial_backup_rate_limiter) = {
     134            0 :             let state = TIMELINES_STATE.lock().unwrap();
     135            0 :             state.get_dependencies()
     136            0 :         };
     137            0 : 
     138            0 :         let timelines_dir = get_tenant_dir(&conf, &tenant_id);
     139            0 :         for timelines_dir_entry in std::fs::read_dir(&timelines_dir)
     140            0 :             .with_context(|| format!("failed to list timelines dir {}", timelines_dir))?
     141              :         {
     142            0 :             match &timelines_dir_entry {
     143            0 :                 Ok(timeline_dir_entry) => {
     144            0 :                     if let Ok(timeline_id) =
     145            0 :                         TimelineId::from_str(timeline_dir_entry.file_name().to_str().unwrap_or(""))
     146              :                     {
     147            0 :                         let ttid = TenantTimelineId::new(tenant_id, timeline_id);
     148            0 :                         match Timeline::load_timeline(&conf, ttid) {
     149            0 :                             Ok(timeline) => {
     150            0 :                                 let tli = Arc::new(timeline);
     151            0 :                                 TIMELINES_STATE
     152            0 :                                     .lock()
     153            0 :                                     .unwrap()
     154            0 :                                     .timelines
     155            0 :                                     .insert(ttid, tli.clone());
     156            0 :                                 tli.bootstrap(
     157            0 :                                     &conf,
     158            0 :                                     broker_active_set.clone(),
     159            0 :                                     partial_backup_rate_limiter.clone(),
     160            0 :                                 );
     161            0 :                             }
     162              :                             // If we can't load a timeline, it's most likely because of a corrupted
     163              :                             // directory. We will log an error and won't allow to delete/recreate
     164              :                             // this timeline. The only way to fix this timeline is to repair manually
     165              :                             // and restart the safekeeper.
     166            0 :                             Err(e) => error!(
     167            0 :                                 "failed to load timeline {} for tenant {}, reason: {:?}",
     168              :                                 timeline_id, tenant_id, e
     169              :                             ),
     170              :                         }
     171            0 :                     }
     172              :                 }
     173            0 :                 Err(e) => error!(
     174            0 :                     "failed to list timelines dir entry {:?} in directory {}, reason: {:?}",
     175              :                     timelines_dir_entry, timelines_dir, e
     176              :                 ),
     177              :             }
     178              :         }
     179              : 
     180            0 :         Ok(())
     181            0 :     }
     182              : 
     183              :     /// Take a lock for timeline loading.
     184            0 :     pub async fn loading_lock() -> Arc<tokio::sync::Mutex<TimelineLoadLock>> {
     185            0 :         TIMELINES_STATE.lock().unwrap().load_lock.clone()
     186            0 :     }
     187              : 
     188              :     /// Load timeline from disk to the memory.
     189            0 :     pub async fn load_timeline<'a>(
     190            0 :         _guard: &tokio::sync::MutexGuard<'a, TimelineLoadLock>,
     191            0 :         ttid: TenantTimelineId,
     192            0 :     ) -> Result<Arc<Timeline>> {
     193            0 :         let (conf, broker_active_set, partial_backup_rate_limiter) =
     194            0 :             TIMELINES_STATE.lock().unwrap().get_dependencies();
     195            0 : 
     196            0 :         match Timeline::load_timeline(&conf, ttid) {
     197            0 :             Ok(timeline) => {
     198            0 :                 let tli = Arc::new(timeline);
     199            0 : 
     200            0 :                 // TODO: prevent concurrent timeline creation/loading
     201            0 :                 TIMELINES_STATE
     202            0 :                     .lock()
     203            0 :                     .unwrap()
     204            0 :                     .timelines
     205            0 :                     .insert(ttid, tli.clone());
     206            0 : 
     207            0 :                 tli.bootstrap(&conf, broker_active_set, partial_backup_rate_limiter);
     208            0 : 
     209            0 :                 Ok(tli)
     210              :             }
     211              :             // If we can't load a timeline, it's bad. Caller will figure it out.
     212            0 :             Err(e) => bail!("failed to load timeline {}, reason: {:?}", ttid, e),
     213              :         }
     214            0 :     }
     215              : 
     216              :     /// Get the number of timelines in the map.
     217            0 :     pub fn timelines_count() -> usize {
     218            0 :         TIMELINES_STATE.lock().unwrap().timelines.len()
     219            0 :     }
     220              : 
     221              :     /// Get the global safekeeper config.
     222            0 :     pub fn get_global_config() -> SafeKeeperConf {
     223            0 :         TIMELINES_STATE.lock().unwrap().get_conf().clone()
     224            0 :     }
     225              : 
     226            0 :     pub fn get_global_broker_active_set() -> Arc<TimelinesSet> {
     227            0 :         TIMELINES_STATE.lock().unwrap().broker_active_set.clone()
     228            0 :     }
     229              : 
     230              :     /// Create a new timeline with the given id. If the timeline already exists, returns
     231              :     /// an existing timeline.
     232            0 :     pub async fn create(
     233            0 :         ttid: TenantTimelineId,
     234            0 :         server_info: ServerInfo,
     235            0 :         commit_lsn: Lsn,
     236            0 :         local_start_lsn: Lsn,
     237            0 :     ) -> Result<Arc<Timeline>> {
     238            0 :         let (conf, broker_active_set, partial_backup_rate_limiter) = {
     239            0 :             let state = TIMELINES_STATE.lock().unwrap();
     240            0 :             if let Ok(timeline) = state.get(&ttid) {
     241              :                 // Timeline already exists, return it.
     242            0 :                 return Ok(timeline);
     243            0 :             }
     244            0 :             state.get_dependencies()
     245            0 :         };
     246            0 : 
     247            0 :         info!("creating new timeline {}", ttid);
     248              : 
     249            0 :         let timeline = Arc::new(Timeline::create_empty(
     250            0 :             &conf,
     251            0 :             ttid,
     252            0 :             server_info,
     253            0 :             commit_lsn,
     254            0 :             local_start_lsn,
     255            0 :         )?);
     256              : 
     257              :         // Take a lock and finish the initialization holding this mutex. No other threads
     258              :         // can interfere with creation after we will insert timeline into the map.
     259              :         {
     260            0 :             let mut shared_state = timeline.write_shared_state().await;
     261              : 
     262              :             // We can get a race condition here in case of concurrent create calls, but only
     263              :             // in theory. create() will return valid timeline on the next try.
     264            0 :             TIMELINES_STATE
     265            0 :                 .lock()
     266            0 :                 .unwrap()
     267            0 :                 .try_insert(timeline.clone())?;
     268              : 
     269              :             // Write the new timeline to the disk and start background workers.
     270              :             // Bootstrap is transactional, so if it fails, the timeline will be deleted,
     271              :             // and the state on disk should remain unchanged.
     272            0 :             if let Err(e) = timeline
     273            0 :                 .init_new(
     274            0 :                     &mut shared_state,
     275            0 :                     &conf,
     276            0 :                     broker_active_set,
     277            0 :                     partial_backup_rate_limiter,
     278            0 :                 )
     279            0 :                 .await
     280              :             {
     281              :                 // Note: the most likely reason for init failure is that the timeline
     282              :                 // directory already exists on disk. This happens when timeline is corrupted
     283              :                 // and wasn't loaded from disk on startup because of that. We want to preserve
     284              :                 // the timeline directory in this case, for further inspection.
     285              : 
     286              :                 // TODO: this is an unusual error, perhaps we should send it to sentry
     287              :                 // TODO: compute will try to create timeline every second, we should add backoff
     288            0 :                 error!("failed to init new timeline {}: {}", ttid, e);
     289              : 
     290              :                 // Timeline failed to init, it cannot be used. Remove it from the map.
     291            0 :                 TIMELINES_STATE.lock().unwrap().timelines.remove(&ttid);
     292            0 :                 return Err(e);
     293            0 :             }
     294            0 :             // We are done with bootstrap, release the lock, return the timeline.
     295            0 :             // {} block forces release before .await
     296            0 :         }
     297            0 :         Ok(timeline)
     298            0 :     }
     299              : 
     300              :     /// Get a timeline from the global map. If it's not present, it doesn't exist on disk,
     301              :     /// or was corrupted and couldn't be loaded on startup. Returned timeline is always valid,
     302              :     /// i.e. loaded in memory and not cancelled.
     303            0 :     pub fn get(ttid: TenantTimelineId) -> Result<Arc<Timeline>, TimelineError> {
     304            0 :         let res = TIMELINES_STATE.lock().unwrap().get(&ttid);
     305            0 : 
     306            0 :         match res {
     307            0 :             Ok(tli) => {
     308            0 :                 if tli.is_cancelled() {
     309            0 :                     return Err(TimelineError::Cancelled(ttid));
     310            0 :                 }
     311            0 :                 Ok(tli)
     312              :             }
     313            0 :             _ => res,
     314              :         }
     315            0 :     }
     316              : 
     317              :     /// Returns all timelines. This is used for background timeline processes.
     318            0 :     pub fn get_all() -> Vec<Arc<Timeline>> {
     319            0 :         let global_lock = TIMELINES_STATE.lock().unwrap();
     320            0 :         global_lock
     321            0 :             .timelines
     322            0 :             .values()
     323            0 :             .filter(|t| !t.is_cancelled())
     324            0 :             .cloned()
     325            0 :             .collect()
     326            0 :     }
     327              : 
     328              :     /// Returns all timelines belonging to a given tenant. Used for deleting all timelines of a tenant,
     329              :     /// and that's why it can return cancelled timelines, to retry deleting them.
     330            0 :     fn get_all_for_tenant(tenant_id: TenantId) -> Vec<Arc<Timeline>> {
     331            0 :         let global_lock = TIMELINES_STATE.lock().unwrap();
     332            0 :         global_lock
     333            0 :             .timelines
     334            0 :             .values()
     335            0 :             .filter(|t| t.ttid.tenant_id == tenant_id)
     336            0 :             .cloned()
     337            0 :             .collect()
     338            0 :     }
     339              : 
     340              :     /// Cancels timeline, then deletes the corresponding data directory.
     341              :     /// If only_local, doesn't remove WAL segments in remote storage.
     342            0 :     pub async fn delete(
     343            0 :         ttid: &TenantTimelineId,
     344            0 :         only_local: bool,
     345            0 :     ) -> Result<TimelineDeleteForceResult> {
     346            0 :         let tli_res = TIMELINES_STATE.lock().unwrap().get(ttid);
     347            0 :         match tli_res {
     348            0 :             Ok(timeline) => {
     349            0 :                 let was_active = timeline.broker_active.load(Ordering::Relaxed);
     350              : 
     351              :                 // Take a lock and finish the deletion holding this mutex.
     352            0 :                 let mut shared_state = timeline.write_shared_state().await;
     353              : 
     354            0 :                 info!("deleting timeline {}, only_local={}", ttid, only_local);
     355            0 :                 let dir_existed = timeline.delete(&mut shared_state, only_local).await?;
     356              : 
     357              :                 // Remove timeline from the map.
     358              :                 // FIXME: re-enable it once we fix the issue with recreation of deleted timelines
     359              :                 // https://github.com/neondatabase/neon/issues/3146
     360              :                 // TIMELINES_STATE.lock().unwrap().timelines.remove(ttid);
     361              : 
     362            0 :                 Ok(TimelineDeleteForceResult {
     363            0 :                     dir_existed,
     364            0 :                     was_active, // TODO: we probably should remove this field
     365            0 :                 })
     366              :             }
     367              :             Err(_) => {
     368              :                 // Timeline is not memory, but it may still exist on disk in broken state.
     369            0 :                 let dir_path = get_timeline_dir(TIMELINES_STATE.lock().unwrap().get_conf(), ttid);
     370            0 :                 let dir_existed = delete_dir(dir_path)?;
     371              : 
     372            0 :                 Ok(TimelineDeleteForceResult {
     373            0 :                     dir_existed,
     374            0 :                     was_active: false,
     375            0 :                 })
     376              :             }
     377              :         }
     378            0 :     }
     379              : 
     380              :     /// Deactivates and deletes all timelines for the tenant. Returns map of all timelines which
     381              :     /// the tenant had, `true` if a timeline was active. There may be a race if new timelines are
     382              :     /// created simultaneously. In that case the function will return error and the caller should
     383              :     /// retry tenant deletion again later.
     384              :     ///
     385              :     /// If only_local, doesn't remove WAL segments in remote storage.
     386            0 :     pub async fn delete_force_all_for_tenant(
     387            0 :         tenant_id: &TenantId,
     388            0 :         only_local: bool,
     389            0 :     ) -> Result<HashMap<TenantTimelineId, TimelineDeleteForceResult>> {
     390            0 :         info!("deleting all timelines for tenant {}", tenant_id);
     391            0 :         let to_delete = Self::get_all_for_tenant(*tenant_id);
     392            0 : 
     393            0 :         let mut err = None;
     394            0 : 
     395            0 :         let mut deleted = HashMap::new();
     396            0 :         for tli in &to_delete {
     397            0 :             match Self::delete(&tli.ttid, only_local).await {
     398            0 :                 Ok(result) => {
     399            0 :                     deleted.insert(tli.ttid, result);
     400            0 :                 }
     401            0 :                 Err(e) => {
     402            0 :                     error!("failed to delete timeline {}: {}", tli.ttid, e);
     403              :                     // Save error to return later.
     404            0 :                     err = Some(e);
     405              :                 }
     406              :             }
     407              :         }
     408              : 
     409              :         // If there was an error, return it.
     410            0 :         if let Some(e) = err {
     411            0 :             return Err(e);
     412            0 :         }
     413            0 : 
     414            0 :         // There may be broken timelines on disk, so delete the whole tenant dir as well.
     415            0 :         // Note that we could concurrently create new timelines while we were deleting them,
     416            0 :         // so the directory may be not empty. In this case timelines will have bad state
     417            0 :         // and timeline background jobs can panic.
     418            0 :         delete_dir(get_tenant_dir(
     419            0 :             TIMELINES_STATE.lock().unwrap().get_conf(),
     420            0 :             tenant_id,
     421            0 :         ))?;
     422              : 
     423              :         // FIXME: we temporarily disabled removing timelines from the map, see `delete_force`
     424              :         // let tlis_after_delete = Self::get_all_for_tenant(*tenant_id);
     425              :         // if !tlis_after_delete.is_empty() {
     426              :         //     // Some timelines were created while we were deleting them, returning error
     427              :         //     // to the caller, so it can retry later.
     428              :         //     bail!(
     429              :         //         "failed to delete all timelines for tenant {}: some timelines were created while we were deleting them",
     430              :         //         tenant_id
     431              :         //     );
     432              :         // }
     433              : 
     434            0 :         Ok(deleted)
     435            0 :     }
     436              : }
     437              : 
     438              : #[derive(Clone, Copy, Serialize)]
     439              : pub struct TimelineDeleteForceResult {
     440              :     pub dir_existed: bool,
     441              :     pub was_active: bool,
     442              : }
     443              : 
     444              : /// Deletes directory and it's contents. Returns false if directory does not exist.
     445            0 : fn delete_dir(path: Utf8PathBuf) -> Result<bool> {
     446            0 :     match std::fs::remove_dir_all(path) {
     447            0 :         Ok(_) => Ok(true),
     448            0 :         Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(false),
     449            0 :         Err(e) => Err(e.into()),
     450              :     }
     451            0 : }
        

Generated by: LCOV version 2.1-beta