Line data Source code
1 : //! Periodically collect consumption metrics for all active tenants
2 : //! and push them to a HTTP endpoint.
3 : use crate::context::{DownloadBehavior, RequestContext};
4 : use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME};
5 : use crate::tenant::size::CalculateSyntheticSizeError;
6 : use crate::tenant::tasks::BackgroundLoopKind;
7 : use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, Tenant};
8 : use camino::Utf8PathBuf;
9 : use consumption_metrics::EventType;
10 : use pageserver_api::models::TenantState;
11 : use remote_storage::{GenericRemoteStorage, RemoteStorageConfig};
12 : use reqwest::Url;
13 : use std::collections::HashMap;
14 : use std::sync::Arc;
15 : use std::time::{Duration, SystemTime};
16 : use tokio::time::Instant;
17 : use tokio_util::sync::CancellationToken;
18 : use tracing::*;
19 : use utils::id::NodeId;
20 :
21 : mod metrics;
22 : use crate::consumption_metrics::metrics::MetricsKey;
23 : mod disk_cache;
24 : mod upload;
25 :
26 : const DEFAULT_HTTP_REPORTING_TIMEOUT: Duration = Duration::from_secs(60);
27 :
28 : /// Basically a key-value pair, but usually in a Vec except for [`Cache`].
29 : ///
30 : /// This is as opposed to `consumption_metrics::Event` which is the externally communicated form.
31 : /// Difference is basically the missing idempotency key, which lives only for the duration of
32 : /// upload attempts.
33 : type RawMetric = (MetricsKey, (EventType, u64));
34 :
35 : /// Caches the [`RawMetric`]s
36 : ///
37 : /// In practice, during startup, last sent values are stored here to be used in calculating new
38 : /// ones. After successful uploading, the cached values are updated to cache. This used to be used
39 : /// for deduplication, but that is no longer needed.
40 : type Cache = HashMap<MetricsKey, (EventType, u64)>;
41 :
42 : /// Main thread that serves metrics collection
43 : #[allow(clippy::too_many_arguments)]
44 0 : pub async fn collect_metrics(
45 0 : tenant_manager: Arc<TenantManager>,
46 0 : metric_collection_endpoint: &Url,
47 0 : metric_collection_bucket: &Option<RemoteStorageConfig>,
48 0 : metric_collection_interval: Duration,
49 0 : _cached_metric_collection_interval: Duration,
50 0 : synthetic_size_calculation_interval: Duration,
51 0 : node_id: NodeId,
52 0 : local_disk_storage: Utf8PathBuf,
53 0 : cancel: CancellationToken,
54 0 : ctx: RequestContext,
55 0 : ) -> anyhow::Result<()> {
56 0 : if _cached_metric_collection_interval != Duration::ZERO {
57 0 : tracing::warn!(
58 0 : "cached_metric_collection_interval is no longer used, please set it to zero."
59 : )
60 0 : }
61 :
62 : // spin up background worker that caclulates tenant sizes
63 0 : let worker_ctx =
64 0 : ctx.detached_child(TaskKind::CalculateSyntheticSize, DownloadBehavior::Download);
65 0 : task_mgr::spawn(
66 0 : BACKGROUND_RUNTIME.handle(),
67 0 : TaskKind::CalculateSyntheticSize,
68 0 : None,
69 0 : None,
70 0 : "synthetic size calculation",
71 0 : false,
72 0 : {
73 0 : let tenant_manager = tenant_manager.clone();
74 0 : async move {
75 0 : calculate_synthetic_size_worker(
76 0 : tenant_manager,
77 0 : synthetic_size_calculation_interval,
78 0 : &cancel,
79 0 : &worker_ctx,
80 0 : )
81 0 : .instrument(info_span!("synthetic_size_worker"))
82 0 : .await?;
83 0 : Ok(())
84 0 : }
85 0 : },
86 0 : );
87 0 :
88 0 : let path: Arc<Utf8PathBuf> = Arc::new(local_disk_storage);
89 0 :
90 0 : let cancel = task_mgr::shutdown_token();
91 0 :
92 0 : let restore_and_reschedule = restore_and_reschedule(&path, metric_collection_interval);
93 :
94 0 : let mut cached_metrics = tokio::select! {
95 : _ = cancel.cancelled() => return Ok(()),
96 : ret = restore_and_reschedule => ret,
97 : };
98 :
99 : // define client here to reuse it for all requests
100 0 : let client = reqwest::ClientBuilder::new()
101 0 : .timeout(DEFAULT_HTTP_REPORTING_TIMEOUT)
102 0 : .build()
103 0 : .expect("Failed to create http client with timeout");
104 :
105 0 : let bucket_client = if let Some(bucket_config) = metric_collection_bucket {
106 0 : match GenericRemoteStorage::from_config(bucket_config) {
107 0 : Ok(client) => Some(client),
108 0 : Err(e) => {
109 0 : // Non-fatal error: if we were given an invalid config, we will proceed
110 0 : // with sending metrics over the network, but not to S3.
111 0 : tracing::warn!("Invalid configuration for metric_collection_bucket: {e}");
112 0 : None
113 : }
114 : }
115 : } else {
116 0 : None
117 : };
118 :
119 0 : let node_id = node_id.to_string();
120 :
121 0 : loop {
122 0 : let started_at = Instant::now();
123 :
124 : // these are point in time, with variable "now"
125 0 : let metrics = metrics::collect_all_metrics(&tenant_manager, &cached_metrics, &ctx).await;
126 :
127 0 : let metrics = Arc::new(metrics);
128 0 :
129 0 : // why not race cancellation here? because we are one of the last tasks, and if we are
130 0 : // already here, better to try to flush the new values.
131 0 :
132 0 : let flush = async {
133 0 : match disk_cache::flush_metrics_to_disk(&metrics, &path).await {
134 : Ok(()) => {
135 0 : tracing::debug!("flushed metrics to disk");
136 : }
137 0 : Err(e) => {
138 0 : // idea here is that if someone creates a directory as our path, then they
139 0 : // might notice it from the logs before shutdown and remove it
140 0 : tracing::error!("failed to persist metrics to {path:?}: {e:#}");
141 : }
142 : }
143 :
144 0 : if let Some(bucket_client) = &bucket_client {
145 0 : let res =
146 0 : upload::upload_metrics_bucket(bucket_client, &cancel, &node_id, &metrics).await;
147 0 : if let Err(e) = res {
148 0 : tracing::error!("failed to upload to S3: {e:#}");
149 0 : }
150 0 : }
151 0 : };
152 :
153 0 : let upload = async {
154 0 : let res = upload::upload_metrics_http(
155 0 : &client,
156 0 : metric_collection_endpoint,
157 0 : &cancel,
158 0 : &node_id,
159 0 : &metrics,
160 0 : &mut cached_metrics,
161 0 : )
162 0 : .await;
163 0 : if let Err(e) = res {
164 : // serialization error which should never happen
165 0 : tracing::error!("failed to upload via HTTP due to {e:#}");
166 0 : }
167 0 : };
168 :
169 : // let these run concurrently
170 : let (_, _) = tokio::join!(flush, upload);
171 :
172 0 : crate::tenant::tasks::warn_when_period_overrun(
173 0 : started_at.elapsed(),
174 0 : metric_collection_interval,
175 0 : BackgroundLoopKind::ConsumptionMetricsCollectMetrics,
176 0 : );
177 :
178 0 : let res = tokio::time::timeout_at(
179 0 : started_at + metric_collection_interval,
180 0 : task_mgr::shutdown_token().cancelled(),
181 0 : )
182 0 : .await;
183 0 : if res.is_ok() {
184 0 : return Ok(());
185 0 : }
186 : }
187 0 : }
188 :
189 : /// Called on the first iteration in an attempt to join the metric uploading schedule from previous
190 : /// pageserver session. Pageserver is supposed to upload at intervals regardless of restarts.
191 : ///
192 : /// Cancellation safe.
193 0 : async fn restore_and_reschedule(
194 0 : path: &Arc<Utf8PathBuf>,
195 0 : metric_collection_interval: Duration,
196 0 : ) -> Cache {
197 0 : let (cached, earlier_metric_at) = match disk_cache::read_metrics_from_disk(path.clone()).await {
198 0 : Ok(found_some) => {
199 0 : // there is no min needed because we write these sequentially in
200 0 : // collect_all_metrics
201 0 : let earlier_metric_at = found_some
202 0 : .iter()
203 0 : .map(|(_, (et, _))| et.recorded_at())
204 0 : .copied()
205 0 : .next();
206 0 :
207 0 : let cached = found_some.into_iter().collect::<Cache>();
208 0 :
209 0 : (cached, earlier_metric_at)
210 : }
211 0 : Err(e) => {
212 0 : use std::io::{Error, ErrorKind};
213 0 :
214 0 : let root = e.root_cause();
215 0 : let maybe_ioerr = root.downcast_ref::<Error>();
216 0 : let is_not_found = maybe_ioerr.is_some_and(|e| e.kind() == ErrorKind::NotFound);
217 0 :
218 0 : if !is_not_found {
219 0 : tracing::info!("failed to read any previous metrics from {path:?}: {e:#}");
220 0 : }
221 :
222 0 : (HashMap::new(), None)
223 : }
224 : };
225 :
226 0 : if let Some(earlier_metric_at) = earlier_metric_at {
227 0 : let earlier_metric_at: SystemTime = earlier_metric_at.into();
228 :
229 0 : let error = reschedule(earlier_metric_at, metric_collection_interval).await;
230 :
231 0 : if let Some(error) = error {
232 0 : if error.as_secs() >= 60 {
233 0 : tracing::info!(
234 0 : error_ms = error.as_millis(),
235 0 : "startup scheduling error due to restart"
236 : )
237 0 : }
238 0 : }
239 0 : }
240 :
241 0 : cached
242 0 : }
243 :
244 0 : async fn reschedule(
245 0 : earlier_metric_at: SystemTime,
246 0 : metric_collection_interval: Duration,
247 0 : ) -> Option<Duration> {
248 0 : let now = SystemTime::now();
249 0 : match now.duration_since(earlier_metric_at) {
250 0 : Ok(from_last_send) if from_last_send < metric_collection_interval => {
251 0 : let sleep_for = metric_collection_interval - from_last_send;
252 0 :
253 0 : let deadline = std::time::Instant::now() + sleep_for;
254 0 :
255 0 : tokio::time::sleep_until(deadline.into()).await;
256 :
257 0 : let now = std::time::Instant::now();
258 0 :
259 0 : // executor threads might be busy, add extra measurements
260 0 : Some(if now < deadline {
261 0 : deadline - now
262 : } else {
263 0 : now - deadline
264 : })
265 : }
266 0 : Ok(from_last_send) => Some(from_last_send.saturating_sub(metric_collection_interval)),
267 : Err(_) => {
268 0 : tracing::warn!(
269 : ?now,
270 : ?earlier_metric_at,
271 0 : "oldest recorded metric is in future; first values will come out with inconsistent timestamps"
272 : );
273 0 : earlier_metric_at.duration_since(now).ok()
274 : }
275 : }
276 0 : }
277 :
278 : /// Caclculate synthetic size for each active tenant
279 0 : async fn calculate_synthetic_size_worker(
280 0 : tenant_manager: Arc<TenantManager>,
281 0 : synthetic_size_calculation_interval: Duration,
282 0 : cancel: &CancellationToken,
283 0 : ctx: &RequestContext,
284 0 : ) -> anyhow::Result<()> {
285 0 : info!("starting calculate_synthetic_size_worker");
286 : scopeguard::defer! {
287 : info!("calculate_synthetic_size_worker stopped");
288 : };
289 :
290 0 : loop {
291 0 : let started_at = Instant::now();
292 :
293 0 : let tenants = match tenant_manager.list_tenants() {
294 0 : Ok(tenants) => tenants,
295 0 : Err(e) => {
296 0 : warn!("cannot get tenant list: {e:#}");
297 0 : continue;
298 : }
299 : };
300 :
301 0 : for (tenant_shard_id, tenant_state, _gen) in tenants {
302 0 : if tenant_state != TenantState::Active {
303 0 : continue;
304 0 : }
305 0 :
306 0 : if !tenant_shard_id.is_shard_zero() {
307 : // We only send consumption metrics from shard 0, so don't waste time calculating
308 : // synthetic size on other shards.
309 0 : continue;
310 0 : }
311 :
312 0 : let Ok(tenant) = tenant_manager.get_attached_tenant_shard(tenant_shard_id) else {
313 0 : continue;
314 : };
315 :
316 0 : if !tenant.is_active() {
317 0 : continue;
318 0 : }
319 0 :
320 0 : // there is never any reason to exit calculate_synthetic_size_worker following any
321 0 : // return value -- we don't need to care about shutdown because no tenant is found when
322 0 : // pageserver is shut down.
323 0 : calculate_and_log(&tenant, cancel, ctx).await;
324 : }
325 :
326 0 : crate::tenant::tasks::warn_when_period_overrun(
327 0 : started_at.elapsed(),
328 0 : synthetic_size_calculation_interval,
329 0 : BackgroundLoopKind::ConsumptionMetricsSyntheticSizeWorker,
330 0 : );
331 :
332 0 : let res = tokio::time::timeout_at(
333 0 : started_at + synthetic_size_calculation_interval,
334 0 : cancel.cancelled(),
335 0 : )
336 0 : .await;
337 0 : if res.is_ok() {
338 0 : return Ok(());
339 0 : }
340 : }
341 0 : }
342 :
343 0 : async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &RequestContext) {
344 0 : const CAUSE: LogicalSizeCalculationCause =
345 0 : LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize;
346 0 :
347 0 : // TODO should we use concurrent_background_tasks_rate_limit() here, like the other background tasks?
348 0 : // We can put in some prioritization for consumption metrics.
349 0 : // Same for the loop that fetches computed metrics.
350 0 : // By using the same limiter, we centralize metrics collection for "start" and "finished" counters,
351 0 : // which turns out is really handy to understand the system.
352 0 : match tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await {
353 0 : Ok(_) => {}
354 0 : Err(CalculateSyntheticSizeError::Cancelled) => {}
355 0 : Err(e) => {
356 0 : let tenant_shard_id = tenant.tenant_shard_id();
357 0 : error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}");
358 : }
359 : }
360 0 : }
|