LCOV - code coverage report
Current view: top level - pageserver/src/tenant - secondary.rs (source / functions) Coverage Total Hit
Test: 07bee600374ccd486c69370d0972d9035964fe68.info Lines: 0.0 % 241 0
Test Date: 2025-02-20 13:11:02 Functions: 0.0 % 31 0

            Line data    Source code
       1              : mod downloader;
       2              : pub mod heatmap;
       3              : mod heatmap_uploader;
       4              : mod scheduler;
       5              : 
       6              : use std::{sync::Arc, time::SystemTime};
       7              : 
       8              : use crate::{
       9              :     context::RequestContext,
      10              :     disk_usage_eviction_task::DiskUsageEvictionInfo,
      11              :     metrics::SECONDARY_HEATMAP_TOTAL_SIZE,
      12              :     task_mgr::{self, TaskKind, BACKGROUND_RUNTIME},
      13              : };
      14              : 
      15              : use self::{
      16              :     downloader::{downloader_task, SecondaryDetail},
      17              :     heatmap_uploader::heatmap_uploader_task,
      18              : };
      19              : 
      20              : use super::{
      21              :     config::{SecondaryLocationConfig, TenantConfOpt},
      22              :     mgr::TenantManager,
      23              :     span::debug_assert_current_span_has_tenant_id,
      24              :     storage_layer::LayerName,
      25              :     GetTenantError,
      26              : };
      27              : 
      28              : use crate::metrics::SECONDARY_RESIDENT_PHYSICAL_SIZE;
      29              : use metrics::UIntGauge;
      30              : use pageserver_api::{
      31              :     models,
      32              :     shard::{ShardIdentity, TenantShardId},
      33              : };
      34              : use remote_storage::GenericRemoteStorage;
      35              : 
      36              : use tokio::task::JoinHandle;
      37              : use tokio_util::sync::CancellationToken;
      38              : use tracing::instrument;
      39              : use utils::{completion::Barrier, id::TimelineId, sync::gate::Gate};
      40              : 
      41              : enum DownloadCommand {
      42              :     Download(TenantShardId),
      43              : }
      44              : enum UploadCommand {
      45              :     Upload(TenantShardId),
      46              : }
      47              : 
      48              : impl UploadCommand {
      49            0 :     fn get_tenant_shard_id(&self) -> &TenantShardId {
      50            0 :         match self {
      51            0 :             Self::Upload(id) => id,
      52            0 :         }
      53            0 :     }
      54              : }
      55              : 
      56              : impl DownloadCommand {
      57            0 :     fn get_tenant_shard_id(&self) -> &TenantShardId {
      58            0 :         match self {
      59            0 :             Self::Download(id) => id,
      60            0 :         }
      61            0 :     }
      62              : }
      63              : 
      64              : struct CommandRequest<T> {
      65              :     payload: T,
      66              :     response_tx: tokio::sync::oneshot::Sender<CommandResponse>,
      67              : }
      68              : 
      69              : struct CommandResponse {
      70              :     result: Result<(), SecondaryTenantError>,
      71              : }
      72              : 
      73              : #[derive(thiserror::Error, Debug)]
      74              : pub(crate) enum SecondaryTenantError {
      75              :     #[error("{0}")]
      76              :     GetTenant(GetTenantError),
      77              :     #[error("shutting down")]
      78              :     ShuttingDown,
      79              : }
      80              : 
      81              : impl From<GetTenantError> for SecondaryTenantError {
      82            0 :     fn from(gte: GetTenantError) -> Self {
      83            0 :         Self::GetTenant(gte)
      84            0 :     }
      85              : }
      86              : 
      87              : // Whereas [`Tenant`] represents an attached tenant, this type represents the work
      88              : // we do for secondary tenant locations: where we are not serving clients or
      89              : // ingesting WAL, but we are maintaining a warm cache of layer files.
      90              : //
      91              : // This type is all about the _download_ path for secondary mode.  The upload path
      92              : // runs separately (see [`heatmap_uploader`]) while a regular attached `Tenant` exists.
      93              : //
      94              : // This structure coordinates TenantManager and SecondaryDownloader,
      95              : // so that the downloader can indicate which tenants it is currently
      96              : // operating on, and the manager can indicate when a particular
      97              : // secondary tenant should cancel any work in flight.
      98              : #[derive(Debug)]
      99              : pub(crate) struct SecondaryTenant {
     100              :     /// Carrying a tenant shard ID simplifies callers such as the downloader
     101              :     /// which need to organize many of these objects by ID.
     102              :     tenant_shard_id: TenantShardId,
     103              : 
     104              :     /// Cancellation token indicates to SecondaryDownloader that it should stop doing
     105              :     /// any work for this tenant at the next opportunity.
     106              :     pub(crate) cancel: CancellationToken,
     107              : 
     108              :     pub(crate) gate: Gate,
     109              : 
     110              :     // Secondary mode does not need the full shard identity or the TenantConfOpt.  However,
     111              :     // storing these enables us to report our full LocationConf, enabling convenient reconciliation
     112              :     // by the control plane (see [`Self::get_location_conf`])
     113              :     shard_identity: ShardIdentity,
     114              :     tenant_conf: std::sync::Mutex<TenantConfOpt>,
     115              : 
     116              :     // Internal state used by the Downloader.
     117              :     detail: std::sync::Mutex<SecondaryDetail>,
     118              : 
     119              :     // Public state indicating overall progress of downloads relative to the last heatmap seen
     120              :     pub(crate) progress: std::sync::Mutex<models::SecondaryProgress>,
     121              : 
     122              :     // Sum of layer sizes on local disk
     123              :     pub(super) resident_size_metric: UIntGauge,
     124              : 
     125              :     // Sum of layer sizes in the most recently downloaded heatmap
     126              :     pub(super) heatmap_total_size_metric: UIntGauge,
     127              : }
     128              : 
     129              : impl SecondaryTenant {
     130            0 :     pub(crate) fn new(
     131            0 :         tenant_shard_id: TenantShardId,
     132            0 :         shard_identity: ShardIdentity,
     133            0 :         tenant_conf: TenantConfOpt,
     134            0 :         config: &SecondaryLocationConfig,
     135            0 :     ) -> Arc<Self> {
     136            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
     137            0 :         let shard_id = format!("{}", tenant_shard_id.shard_slug());
     138            0 :         let resident_size_metric = SECONDARY_RESIDENT_PHYSICAL_SIZE
     139            0 :             .get_metric_with_label_values(&[&tenant_id, &shard_id])
     140            0 :             .unwrap();
     141            0 : 
     142            0 :         let heatmap_total_size_metric = SECONDARY_HEATMAP_TOTAL_SIZE
     143            0 :             .get_metric_with_label_values(&[&tenant_id, &shard_id])
     144            0 :             .unwrap();
     145            0 : 
     146            0 :         Arc::new(Self {
     147            0 :             tenant_shard_id,
     148            0 :             // todo: shall we make this a descendent of the
     149            0 :             // main cancellation token, or is it sufficient that
     150            0 :             // on shutdown we walk the tenants and fire their
     151            0 :             // individual cancellations?
     152            0 :             cancel: CancellationToken::new(),
     153            0 :             gate: Gate::default(),
     154            0 : 
     155            0 :             shard_identity,
     156            0 :             tenant_conf: std::sync::Mutex::new(tenant_conf),
     157            0 : 
     158            0 :             detail: std::sync::Mutex::new(SecondaryDetail::new(config.clone())),
     159            0 : 
     160            0 :             progress: std::sync::Mutex::default(),
     161            0 : 
     162            0 :             resident_size_metric,
     163            0 :             heatmap_total_size_metric,
     164            0 :         })
     165            0 :     }
     166              : 
     167            0 :     pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
     168            0 :         self.tenant_shard_id
     169            0 :     }
     170              : 
     171            0 :     pub(crate) async fn shutdown(&self) {
     172            0 :         self.cancel.cancel();
     173            0 : 
     174            0 :         // Wait for any secondary downloader work to complete
     175            0 :         self.gate.close().await;
     176              : 
     177            0 :         self.validate_metrics();
     178            0 : 
     179            0 :         let tenant_id = self.tenant_shard_id.tenant_id.to_string();
     180            0 :         let shard_id = format!("{}", self.tenant_shard_id.shard_slug());
     181            0 :         let _ = SECONDARY_RESIDENT_PHYSICAL_SIZE.remove_label_values(&[&tenant_id, &shard_id]);
     182            0 :         let _ = SECONDARY_HEATMAP_TOTAL_SIZE.remove_label_values(&[&tenant_id, &shard_id]);
     183            0 :     }
     184              : 
     185            0 :     pub(crate) fn set_config(&self, config: &SecondaryLocationConfig) {
     186            0 :         self.detail.lock().unwrap().config = config.clone();
     187            0 :     }
     188              : 
     189            0 :     pub(crate) fn set_tenant_conf(&self, config: &TenantConfOpt) {
     190            0 :         *(self.tenant_conf.lock().unwrap()) = config.clone();
     191            0 :     }
     192              : 
     193              :     /// For API access: generate a LocationConfig equivalent to the one that would be used to
     194              :     /// create a Tenant in the same state.  Do not use this in hot paths: it's for relatively
     195              :     /// rare external API calls, like a reconciliation at startup.
     196            0 :     pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
     197            0 :         let conf = self.detail.lock().unwrap().config.clone();
     198            0 : 
     199            0 :         let conf = models::LocationConfigSecondary { warm: conf.warm };
     200            0 : 
     201            0 :         let tenant_conf = self.tenant_conf.lock().unwrap().clone();
     202            0 :         models::LocationConfig {
     203            0 :             mode: models::LocationConfigMode::Secondary,
     204            0 :             generation: None,
     205            0 :             secondary_conf: Some(conf),
     206            0 :             shard_number: self.tenant_shard_id.shard_number.0,
     207            0 :             shard_count: self.tenant_shard_id.shard_count.literal(),
     208            0 :             shard_stripe_size: self.shard_identity.stripe_size.0,
     209            0 :             tenant_conf: tenant_conf.into(),
     210            0 :         }
     211            0 :     }
     212              : 
     213            0 :     pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
     214            0 :         &self.tenant_shard_id
     215            0 :     }
     216              : 
     217            0 :     pub(crate) fn get_layers_for_eviction(self: &Arc<Self>) -> (DiskUsageEvictionInfo, usize) {
     218            0 :         self.detail.lock().unwrap().get_layers_for_eviction(self)
     219            0 :     }
     220              : 
     221              :     /// Cancellation safe, but on cancellation the eviction will go through
     222              :     #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline_id, name=%name))]
     223              :     pub(crate) async fn evict_layer(self: &Arc<Self>, timeline_id: TimelineId, name: LayerName) {
     224              :         debug_assert_current_span_has_tenant_id();
     225              : 
     226              :         let guard = match self.gate.enter() {
     227              :             Ok(g) => g,
     228              :             Err(_) => {
     229              :                 tracing::debug!("Dropping layer evictions, secondary tenant shutting down",);
     230              :                 return;
     231              :             }
     232              :         };
     233              : 
     234              :         let now = SystemTime::now();
     235              :         tracing::info!("Evicting secondary layer");
     236              : 
     237              :         let this = self.clone();
     238              : 
     239              :         // spawn it to be cancellation safe
     240            0 :         tokio::task::spawn_blocking(move || {
     241            0 :             let _guard = guard;
     242            0 : 
     243            0 :             // Update the timeline's state.  This does not have to be synchronized with
     244            0 :             // the download process, because:
     245            0 :             // - If downloader is racing with us to remove a file (e.g. because it is
     246            0 :             //   removed from heatmap), then our mutual .remove() operations will both
     247            0 :             //   succeed.
     248            0 :             // - If downloader is racing with us to download the object (this would require
     249            0 :             //   multiple eviction iterations to race with multiple download iterations), then
     250            0 :             //   if we remove it from the state, the worst that happens is the downloader
     251            0 :             //   downloads it again before re-inserting, or we delete the file but it remains
     252            0 :             //   in the state map (in which case it will be downloaded if this secondary
     253            0 :             //   tenant transitions to attached and tries to access it)
     254            0 :             //
     255            0 :             // The important assumption here is that the secondary timeline state does not
     256            0 :             // have to 100% match what is on disk, because it's a best-effort warming
     257            0 :             // of the cache.
     258            0 :             let mut detail = this.detail.lock().unwrap();
     259            0 :             if let Some(removed) =
     260            0 :                 detail.evict_layer(name, &timeline_id, now, &this.resident_size_metric)
     261            0 :             {
     262            0 :                 // We might race with removal of the same layer during downloads, so finding the layer we
     263            0 :                 // were trying to remove is optional.  Only issue the disk I/O to remove it if we found it.
     264            0 :                 removed.remove_blocking();
     265            0 :             }
     266            0 :         })
     267              :         .await
     268              :         .expect("secondary eviction should not have panicked");
     269              :     }
     270              : 
     271              :     /// Exhaustive check that incrementally updated metrics match the actual state.
     272              :     #[cfg(feature = "testing")]
     273            0 :     fn validate_metrics(&self) {
     274            0 :         let detail = self.detail.lock().unwrap();
     275            0 :         let resident_size = detail.total_resident_size();
     276            0 : 
     277            0 :         assert_eq!(resident_size, self.resident_size_metric.get());
     278            0 :     }
     279              : 
     280              :     #[cfg(not(feature = "testing"))]
     281              :     fn validate_metrics(&self) {
     282              :         // No-op in non-testing builds
     283              :     }
     284              : }
     285              : 
     286              : /// The SecondaryController is a pseudo-rpc client for administrative control of secondary mode downloads,
     287              : /// and heatmap uploads.  This is not a hot data path: it's used for:
     288              : /// - Live migrations, where we want to ensure a migration destination has the freshest possible
     289              : ///   content before trying to cut over.
     290              : /// - Tests, where we want to immediately upload/download for a particular tenant.
     291              : ///
     292              : /// In normal operations, outside of migrations, uploads & downloads are autonomous and not driven by this interface.
     293              : pub struct SecondaryController {
     294              :     upload_req_tx: tokio::sync::mpsc::Sender<CommandRequest<UploadCommand>>,
     295              :     download_req_tx: tokio::sync::mpsc::Sender<CommandRequest<DownloadCommand>>,
     296              : }
     297              : 
     298              : impl SecondaryController {
     299            0 :     async fn dispatch<T>(
     300            0 :         &self,
     301            0 :         queue: &tokio::sync::mpsc::Sender<CommandRequest<T>>,
     302            0 :         payload: T,
     303            0 :     ) -> Result<(), SecondaryTenantError> {
     304            0 :         let (response_tx, response_rx) = tokio::sync::oneshot::channel();
     305            0 : 
     306            0 :         queue
     307            0 :             .send(CommandRequest {
     308            0 :                 payload,
     309            0 :                 response_tx,
     310            0 :             })
     311            0 :             .await
     312            0 :             .map_err(|_| SecondaryTenantError::ShuttingDown)?;
     313              : 
     314            0 :         let response = response_rx
     315            0 :             .await
     316            0 :             .map_err(|_| SecondaryTenantError::ShuttingDown)?;
     317              : 
     318            0 :         response.result
     319            0 :     }
     320              : 
     321            0 :     pub(crate) async fn upload_tenant(
     322            0 :         &self,
     323            0 :         tenant_shard_id: TenantShardId,
     324            0 :     ) -> Result<(), SecondaryTenantError> {
     325            0 :         self.dispatch(&self.upload_req_tx, UploadCommand::Upload(tenant_shard_id))
     326            0 :             .await
     327            0 :     }
     328            0 :     pub(crate) async fn download_tenant(
     329            0 :         &self,
     330            0 :         tenant_shard_id: TenantShardId,
     331            0 :     ) -> Result<(), SecondaryTenantError> {
     332            0 :         self.dispatch(
     333            0 :             &self.download_req_tx,
     334            0 :             DownloadCommand::Download(tenant_shard_id),
     335            0 :         )
     336            0 :         .await
     337            0 :     }
     338              : }
     339              : 
     340              : pub struct GlobalTasks {
     341              :     cancel: CancellationToken,
     342              :     uploader: JoinHandle<()>,
     343              :     downloader: JoinHandle<()>,
     344              : }
     345              : 
     346              : impl GlobalTasks {
     347              :     /// Caller is responsible for requesting shutdown via the cancellation token that was
     348              :     /// passed to [`spawn_tasks`].
     349              :     ///
     350              :     /// # Panics
     351              :     ///
     352              :     /// This method panics if that token is not cancelled.
     353              :     /// This is low-risk because we're calling this during process shutdown, so, a panic
     354              :     /// will be informative but not cause undue downtime.
     355            0 :     pub async fn wait(self) {
     356            0 :         let Self {
     357            0 :             cancel,
     358            0 :             uploader,
     359            0 :             downloader,
     360            0 :         } = self;
     361            0 :         assert!(
     362            0 :             cancel.is_cancelled(),
     363            0 :             "must cancel cancellation token, otherwise the tasks will not shut down"
     364              :         );
     365              : 
     366            0 :         let (uploader, downloader) = futures::future::join(uploader, downloader).await;
     367            0 :         uploader.expect(
     368            0 :             "unreachable: exit_on_panic_or_error would catch the panic and exit the process",
     369            0 :         );
     370            0 :         downloader.expect(
     371            0 :             "unreachable: exit_on_panic_or_error would catch the panic and exit the process",
     372            0 :         );
     373            0 :     }
     374              : }
     375              : 
     376            0 : pub fn spawn_tasks(
     377            0 :     tenant_manager: Arc<TenantManager>,
     378            0 :     remote_storage: GenericRemoteStorage,
     379            0 :     background_jobs_can_start: Barrier,
     380            0 :     cancel: CancellationToken,
     381            0 : ) -> (SecondaryController, GlobalTasks) {
     382            0 :     let mgr_clone = tenant_manager.clone();
     383            0 :     let storage_clone = remote_storage.clone();
     384            0 :     let bg_jobs_clone = background_jobs_can_start.clone();
     385            0 : 
     386            0 :     let (download_req_tx, download_req_rx) =
     387            0 :         tokio::sync::mpsc::channel::<CommandRequest<DownloadCommand>>(16);
     388            0 :     let (upload_req_tx, upload_req_rx) =
     389            0 :         tokio::sync::mpsc::channel::<CommandRequest<UploadCommand>>(16);
     390            0 : 
     391            0 :     let cancel_clone = cancel.clone();
     392            0 :     let downloader = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
     393            0 :         "secondary tenant downloads",
     394            0 :         async move {
     395            0 :             downloader_task(
     396            0 :                 mgr_clone,
     397            0 :                 storage_clone,
     398            0 :                 download_req_rx,
     399            0 :                 bg_jobs_clone,
     400            0 :                 cancel_clone,
     401            0 :                 RequestContext::new(
     402            0 :                     TaskKind::SecondaryDownloads,
     403            0 :                     crate::context::DownloadBehavior::Download,
     404            0 :                 ),
     405            0 :             )
     406            0 :             .await;
     407            0 :             anyhow::Ok(())
     408            0 :         },
     409            0 :     ));
     410            0 : 
     411            0 :     let cancel_clone = cancel.clone();
     412            0 :     let uploader = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
     413            0 :         "heatmap uploads",
     414            0 :         async move {
     415            0 :             heatmap_uploader_task(
     416            0 :                 tenant_manager,
     417            0 :                 remote_storage,
     418            0 :                 upload_req_rx,
     419            0 :                 background_jobs_can_start,
     420            0 :                 cancel_clone,
     421            0 :             )
     422            0 :             .await;
     423            0 :             anyhow::Ok(())
     424            0 :         },
     425            0 :     ));
     426            0 : 
     427            0 :     (
     428            0 :         SecondaryController {
     429            0 :             upload_req_tx,
     430            0 :             download_req_tx,
     431            0 :         },
     432            0 :         GlobalTasks {
     433            0 :             cancel,
     434            0 :             uploader,
     435            0 :             downloader,
     436            0 :         },
     437            0 :     )
     438            0 : }
        

Generated by: LCOV version 2.1-beta