LCOV - code coverage report
Current view: top level - pageserver/src - consumption_metrics.rs (source / functions) Coverage Total Hit
Test: 2b0730d767f560e20b6748f57465922aa8bb805e.info Lines: 0.0 % 257 0
Test Date: 2024-09-25 14:04:07 Functions: 0.0 % 17 0

            Line data    Source code
       1              : //! Periodically collect consumption metrics for all active tenants
       2              : //! and push them to a HTTP endpoint.
       3              : use crate::config::PageServerConf;
       4              : use crate::consumption_metrics::metrics::MetricsKey;
       5              : use crate::consumption_metrics::upload::KeyGen as _;
       6              : use crate::context::{DownloadBehavior, RequestContext};
       7              : use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME};
       8              : use crate::tenant::size::CalculateSyntheticSizeError;
       9              : use crate::tenant::tasks::BackgroundLoopKind;
      10              : use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, Tenant};
      11              : use camino::Utf8PathBuf;
      12              : use consumption_metrics::EventType;
      13              : use itertools::Itertools as _;
      14              : use pageserver_api::models::TenantState;
      15              : use remote_storage::{GenericRemoteStorage, RemoteStorageConfig};
      16              : use reqwest::Url;
      17              : use std::collections::HashMap;
      18              : use std::sync::Arc;
      19              : use std::time::{Duration, SystemTime};
      20              : use tokio::time::Instant;
      21              : use tokio_util::sync::CancellationToken;
      22              : use tracing::*;
      23              : use utils::id::NodeId;
      24              : 
      25              : mod disk_cache;
      26              : mod metrics;
      27              : mod upload;
      28              : 
      29              : const DEFAULT_HTTP_REPORTING_TIMEOUT: Duration = Duration::from_secs(60);
      30              : 
      31              : /// Basically a key-value pair, but usually in a Vec except for [`Cache`].
      32              : ///
      33              : /// This is as opposed to `consumption_metrics::Event` which is the externally communicated form.
      34              : /// Difference is basically the missing idempotency key, which lives only for the duration of
      35              : /// upload attempts.
      36              : type RawMetric = (MetricsKey, (EventType, u64));
      37              : 
      38              : /// Caches the [`RawMetric`]s
      39              : ///
      40              : /// In practice, during startup, last sent values are stored here to be used in calculating new
      41              : /// ones. After successful uploading, the cached values are updated to cache. This used to be used
      42              : /// for deduplication, but that is no longer needed.
      43              : type Cache = HashMap<MetricsKey, (EventType, u64)>;
      44              : 
      45            0 : pub async fn run(
      46            0 :     conf: &'static PageServerConf,
      47            0 :     tenant_manager: Arc<TenantManager>,
      48            0 :     cancel: CancellationToken,
      49            0 : ) {
      50            0 :     let Some(metric_collection_endpoint) = conf.metric_collection_endpoint.as_ref() else {
      51            0 :         return;
      52              :     };
      53              : 
      54            0 :     let local_disk_storage = conf.workdir.join("last_consumption_metrics.json");
      55            0 : 
      56            0 :     let metrics_ctx = RequestContext::todo_child(
      57            0 :         TaskKind::MetricsCollection,
      58            0 :         // This task itself shouldn't download anything.
      59            0 :         // The actual size calculation does need downloads, and
      60            0 :         // creates a child context with the right DownloadBehavior.
      61            0 :         DownloadBehavior::Error,
      62            0 :     );
      63            0 :     let collect_metrics = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
      64            0 :         "consumption metrics collection",
      65            0 :         collect_metrics(
      66            0 :             tenant_manager.clone(),
      67            0 :             metric_collection_endpoint,
      68            0 :             &conf.metric_collection_bucket,
      69            0 :             conf.metric_collection_interval,
      70            0 :             conf.id,
      71            0 :             local_disk_storage,
      72            0 :             cancel.clone(),
      73            0 :             metrics_ctx,
      74            0 :         )
      75            0 :         .instrument(info_span!("metrics_collection")),
      76              :     ));
      77              : 
      78            0 :     let worker_ctx =
      79            0 :         RequestContext::todo_child(TaskKind::CalculateSyntheticSize, DownloadBehavior::Download);
      80            0 :     let synthetic_size_worker = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
      81            0 :         "synthetic size calculation",
      82            0 :         calculate_synthetic_size_worker(
      83            0 :             tenant_manager.clone(),
      84            0 :             conf.synthetic_size_calculation_interval,
      85            0 :             cancel.clone(),
      86            0 :             worker_ctx,
      87            0 :         )
      88            0 :         .instrument(info_span!("synthetic_size_worker")),
      89              :     ));
      90              : 
      91            0 :     let (collect_metrics, synthetic_size_worker) =
      92            0 :         futures::future::join(collect_metrics, synthetic_size_worker).await;
      93            0 :     collect_metrics
      94            0 :         .expect("unreachable: exit_on_panic_or_error would catch the panic and exit the process");
      95            0 :     synthetic_size_worker
      96            0 :         .expect("unreachable: exit_on_panic_or_error would catch the panic and exit the process");
      97            0 : }
      98              : 
      99              : /// Main thread that serves metrics collection
     100              : #[allow(clippy::too_many_arguments)]
     101            0 : async fn collect_metrics(
     102            0 :     tenant_manager: Arc<TenantManager>,
     103            0 :     metric_collection_endpoint: &Url,
     104            0 :     metric_collection_bucket: &Option<RemoteStorageConfig>,
     105            0 :     metric_collection_interval: Duration,
     106            0 :     node_id: NodeId,
     107            0 :     local_disk_storage: Utf8PathBuf,
     108            0 :     cancel: CancellationToken,
     109            0 :     ctx: RequestContext,
     110            0 : ) -> anyhow::Result<()> {
     111            0 :     let path: Arc<Utf8PathBuf> = Arc::new(local_disk_storage);
     112            0 : 
     113            0 :     let restore_and_reschedule = restore_and_reschedule(&path, metric_collection_interval);
     114              : 
     115            0 :     let mut cached_metrics = tokio::select! {
     116            0 :         _ = cancel.cancelled() => return Ok(()),
     117            0 :         ret = restore_and_reschedule => ret,
     118            0 :     };
     119            0 : 
     120            0 :     // define client here to reuse it for all requests
     121            0 :     let client = reqwest::ClientBuilder::new()
     122            0 :         .timeout(DEFAULT_HTTP_REPORTING_TIMEOUT)
     123            0 :         .build()
     124            0 :         .expect("Failed to create http client with timeout");
     125              : 
     126            0 :     let bucket_client = if let Some(bucket_config) = metric_collection_bucket {
     127            0 :         match GenericRemoteStorage::from_config(bucket_config).await {
     128            0 :             Ok(client) => Some(client),
     129            0 :             Err(e) => {
     130            0 :                 // Non-fatal error: if we were given an invalid config, we will proceed
     131            0 :                 // with sending metrics over the network, but not to S3.
     132            0 :                 tracing::warn!("Invalid configuration for metric_collection_bucket: {e}");
     133            0 :                 None
     134              :             }
     135              :         }
     136              :     } else {
     137            0 :         None
     138              :     };
     139              : 
     140            0 :     let node_id = node_id.to_string();
     141              : 
     142              :     loop {
     143            0 :         let started_at = Instant::now();
     144              : 
     145              :         // these are point in time, with variable "now"
     146            0 :         let metrics = metrics::collect_all_metrics(&tenant_manager, &cached_metrics, &ctx).await;
     147              : 
     148              :         // Pre-generate event idempotency keys, to reuse them across the bucket
     149              :         // and HTTP sinks.
     150            0 :         let idempotency_keys = std::iter::repeat_with(|| node_id.as_str().generate())
     151            0 :             .take(metrics.len())
     152            0 :             .collect_vec();
     153            0 : 
     154            0 :         let metrics = Arc::new(metrics);
     155            0 : 
     156            0 :         // why not race cancellation here? because we are one of the last tasks, and if we are
     157            0 :         // already here, better to try to flush the new values.
     158            0 : 
     159            0 :         let flush = async {
     160            0 :             match disk_cache::flush_metrics_to_disk(&metrics, &path).await {
     161              :                 Ok(()) => {
     162            0 :                     tracing::debug!("flushed metrics to disk");
     163              :                 }
     164            0 :                 Err(e) => {
     165            0 :                     // idea here is that if someone creates a directory as our path, then they
     166            0 :                     // might notice it from the logs before shutdown and remove it
     167            0 :                     tracing::error!("failed to persist metrics to {path:?}: {e:#}");
     168              :                 }
     169              :             }
     170              : 
     171            0 :             if let Some(bucket_client) = &bucket_client {
     172            0 :                 let res = upload::upload_metrics_bucket(
     173            0 :                     bucket_client,
     174            0 :                     &cancel,
     175            0 :                     &node_id,
     176            0 :                     &metrics,
     177            0 :                     &idempotency_keys,
     178            0 :                 )
     179            0 :                 .await;
     180            0 :                 if let Err(e) = res {
     181            0 :                     tracing::error!("failed to upload to remote storage: {e:#}");
     182            0 :                 }
     183            0 :             }
     184            0 :         };
     185              : 
     186            0 :         let upload = async {
     187            0 :             let res = upload::upload_metrics_http(
     188            0 :                 &client,
     189            0 :                 metric_collection_endpoint,
     190            0 :                 &cancel,
     191            0 :                 &metrics,
     192            0 :                 &mut cached_metrics,
     193            0 :                 &idempotency_keys,
     194            0 :             )
     195            0 :             .await;
     196            0 :             if let Err(e) = res {
     197              :                 // serialization error which should never happen
     198            0 :                 tracing::error!("failed to upload via HTTP due to {e:#}");
     199            0 :             }
     200            0 :         };
     201              : 
     202              :         // let these run concurrently
     203            0 :         let (_, _) = tokio::join!(flush, upload);
     204              : 
     205            0 :         crate::tenant::tasks::warn_when_period_overrun(
     206            0 :             started_at.elapsed(),
     207            0 :             metric_collection_interval,
     208            0 :             BackgroundLoopKind::ConsumptionMetricsCollectMetrics,
     209            0 :         );
     210              : 
     211            0 :         let res =
     212            0 :             tokio::time::timeout_at(started_at + metric_collection_interval, cancel.cancelled())
     213            0 :                 .await;
     214            0 :         if res.is_ok() {
     215            0 :             return Ok(());
     216            0 :         }
     217              :     }
     218            0 : }
     219              : 
     220              : /// Called on the first iteration in an attempt to join the metric uploading schedule from previous
     221              : /// pageserver session. Pageserver is supposed to upload at intervals regardless of restarts.
     222              : ///
     223              : /// Cancellation safe.
     224            0 : async fn restore_and_reschedule(
     225            0 :     path: &Arc<Utf8PathBuf>,
     226            0 :     metric_collection_interval: Duration,
     227            0 : ) -> Cache {
     228            0 :     let (cached, earlier_metric_at) = match disk_cache::read_metrics_from_disk(path.clone()).await {
     229            0 :         Ok(found_some) => {
     230            0 :             // there is no min needed because we write these sequentially in
     231            0 :             // collect_all_metrics
     232            0 :             let earlier_metric_at = found_some
     233            0 :                 .iter()
     234            0 :                 .map(|(_, (et, _))| et.recorded_at())
     235            0 :                 .copied()
     236            0 :                 .next();
     237            0 : 
     238            0 :             let cached = found_some.into_iter().collect::<Cache>();
     239            0 : 
     240            0 :             (cached, earlier_metric_at)
     241              :         }
     242            0 :         Err(e) => {
     243              :             use std::io::{Error, ErrorKind};
     244              : 
     245            0 :             let root = e.root_cause();
     246            0 :             let maybe_ioerr = root.downcast_ref::<Error>();
     247            0 :             let is_not_found = maybe_ioerr.is_some_and(|e| e.kind() == ErrorKind::NotFound);
     248            0 : 
     249            0 :             if !is_not_found {
     250            0 :                 tracing::info!("failed to read any previous metrics from {path:?}: {e:#}");
     251            0 :             }
     252              : 
     253            0 :             (HashMap::new(), None)
     254              :         }
     255              :     };
     256              : 
     257            0 :     if let Some(earlier_metric_at) = earlier_metric_at {
     258            0 :         let earlier_metric_at: SystemTime = earlier_metric_at.into();
     259              : 
     260            0 :         let error = reschedule(earlier_metric_at, metric_collection_interval).await;
     261              : 
     262            0 :         if let Some(error) = error {
     263            0 :             if error.as_secs() >= 60 {
     264            0 :                 tracing::info!(
     265            0 :                     error_ms = error.as_millis(),
     266            0 :                     "startup scheduling error due to restart"
     267              :                 )
     268            0 :             }
     269            0 :         }
     270            0 :     }
     271              : 
     272            0 :     cached
     273            0 : }
     274              : 
     275            0 : async fn reschedule(
     276            0 :     earlier_metric_at: SystemTime,
     277            0 :     metric_collection_interval: Duration,
     278            0 : ) -> Option<Duration> {
     279            0 :     let now = SystemTime::now();
     280            0 :     match now.duration_since(earlier_metric_at) {
     281            0 :         Ok(from_last_send) if from_last_send < metric_collection_interval => {
     282            0 :             let sleep_for = metric_collection_interval - from_last_send;
     283            0 : 
     284            0 :             let deadline = std::time::Instant::now() + sleep_for;
     285            0 : 
     286            0 :             tokio::time::sleep_until(deadline.into()).await;
     287              : 
     288            0 :             let now = std::time::Instant::now();
     289            0 : 
     290            0 :             // executor threads might be busy, add extra measurements
     291            0 :             Some(if now < deadline {
     292            0 :                 deadline - now
     293              :             } else {
     294            0 :                 now - deadline
     295              :             })
     296              :         }
     297            0 :         Ok(from_last_send) => Some(from_last_send.saturating_sub(metric_collection_interval)),
     298              :         Err(_) => {
     299            0 :             tracing::warn!(
     300              :                 ?now,
     301              :                 ?earlier_metric_at,
     302            0 :                 "oldest recorded metric is in future; first values will come out with inconsistent timestamps"
     303              :             );
     304            0 :             earlier_metric_at.duration_since(now).ok()
     305              :         }
     306              :     }
     307            0 : }
     308              : 
     309              : /// Caclculate synthetic size for each active tenant
     310            0 : async fn calculate_synthetic_size_worker(
     311            0 :     tenant_manager: Arc<TenantManager>,
     312            0 :     synthetic_size_calculation_interval: Duration,
     313            0 :     cancel: CancellationToken,
     314            0 :     ctx: RequestContext,
     315            0 : ) -> anyhow::Result<()> {
     316            0 :     info!("starting calculate_synthetic_size_worker");
     317            0 :     scopeguard::defer! {
     318            0 :         info!("calculate_synthetic_size_worker stopped");
     319            0 :     };
     320              : 
     321              :     loop {
     322            0 :         let started_at = Instant::now();
     323              : 
     324            0 :         let tenants = match tenant_manager.list_tenants() {
     325            0 :             Ok(tenants) => tenants,
     326            0 :             Err(e) => {
     327            0 :                 warn!("cannot get tenant list: {e:#}");
     328            0 :                 continue;
     329              :             }
     330              :         };
     331              : 
     332            0 :         for (tenant_shard_id, tenant_state, _gen) in tenants {
     333            0 :             if tenant_state != TenantState::Active {
     334            0 :                 continue;
     335            0 :             }
     336            0 : 
     337            0 :             if !tenant_shard_id.is_shard_zero() {
     338              :                 // We only send consumption metrics from shard 0, so don't waste time calculating
     339              :                 // synthetic size on other shards.
     340            0 :                 continue;
     341            0 :             }
     342              : 
     343            0 :             let Ok(tenant) = tenant_manager.get_attached_tenant_shard(tenant_shard_id) else {
     344            0 :                 continue;
     345              :             };
     346              : 
     347            0 :             if !tenant.is_active() {
     348            0 :                 continue;
     349            0 :             }
     350            0 : 
     351            0 :             // there is never any reason to exit calculate_synthetic_size_worker following any
     352            0 :             // return value -- we don't need to care about shutdown because no tenant is found when
     353            0 :             // pageserver is shut down.
     354            0 :             calculate_and_log(&tenant, &cancel, &ctx).await;
     355              :         }
     356              : 
     357            0 :         crate::tenant::tasks::warn_when_period_overrun(
     358            0 :             started_at.elapsed(),
     359            0 :             synthetic_size_calculation_interval,
     360            0 :             BackgroundLoopKind::ConsumptionMetricsSyntheticSizeWorker,
     361            0 :         );
     362              : 
     363            0 :         let res = tokio::time::timeout_at(
     364            0 :             started_at + synthetic_size_calculation_interval,
     365            0 :             cancel.cancelled(),
     366            0 :         )
     367            0 :         .await;
     368            0 :         if res.is_ok() {
     369            0 :             return Ok(());
     370            0 :         }
     371              :     }
     372            0 : }
     373              : 
     374            0 : async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &RequestContext) {
     375              :     const CAUSE: LogicalSizeCalculationCause =
     376              :         LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize;
     377              : 
     378              :     // TODO should we use concurrent_background_tasks_rate_limit() here, like the other background tasks?
     379              :     // We can put in some prioritization for consumption metrics.
     380              :     // Same for the loop that fetches computed metrics.
     381              :     // By using the same limiter, we centralize metrics collection for "start" and "finished" counters,
     382              :     // which turns out is really handy to understand the system.
     383            0 :     match tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await {
     384            0 :         Ok(_) => {}
     385            0 :         Err(CalculateSyntheticSizeError::Cancelled) => {}
     386            0 :         Err(e) => {
     387            0 :             let tenant_shard_id = tenant.tenant_shard_id();
     388            0 :             error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}");
     389              :         }
     390              :     }
     391            0 : }
        

Generated by: LCOV version 2.1-beta