Line data Source code
1 : use std::cell::Cell;
2 : use std::collections::HashMap;
3 : use std::num::NonZeroUsize;
4 : use std::os::fd::RawFd;
5 : use std::sync::atomic::AtomicU64;
6 : use std::sync::{Arc, Mutex};
7 : use std::time::{Duration, Instant};
8 :
9 : use enum_map::{Enum as _, EnumMap};
10 : use futures::Future;
11 : use metrics::{
12 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
13 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
14 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
15 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
16 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
17 : };
18 : use once_cell::sync::Lazy;
19 : use pageserver_api::config::defaults::DEFAULT_MAX_GET_VECTORED_KEYS;
20 : use pageserver_api::config::{
21 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
22 : PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy,
23 : };
24 : use pageserver_api::models::InMemoryLayerInfo;
25 : use pageserver_api::shard::TenantShardId;
26 : use postgres_backend::{QueryError, is_expected_io_error};
27 : use pq_proto::framed::ConnectionError;
28 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
29 : use strum_macros::{IntoStaticStr, VariantNames};
30 : use utils::id::TimelineId;
31 :
32 : use crate::config;
33 : use crate::config::PageServerConf;
34 : use crate::context::{PageContentKind, RequestContext};
35 : use crate::pgdatadir_mapping::DatadirModificationStats;
36 : use crate::task_mgr::TaskKind;
37 : use crate::tenant::layer_map::LayerMap;
38 : use crate::tenant::mgr::TenantSlot;
39 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
40 : use crate::tenant::tasks::BackgroundLoopKind;
41 : use crate::tenant::throttle::ThrottleResult;
42 :
43 : /// Prometheus histogram buckets (in seconds) for operations in the critical
44 : /// path. In other words, operations that directly affect that latency of user
45 : /// queries.
46 : ///
47 : /// The buckets capture the majority of latencies in the microsecond and
48 : /// millisecond range but also extend far enough up to distinguish "bad" from
49 : /// "really bad".
50 : const CRITICAL_OP_BUCKETS: &[f64] = &[
51 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
52 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
53 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
54 : ];
55 :
56 : // Metrics collected on operations on the storage repository.
57 : #[derive(Debug, VariantNames, IntoStaticStr)]
58 : #[strum(serialize_all = "kebab_case")]
59 : pub(crate) enum StorageTimeOperation {
60 : #[strum(serialize = "layer flush")]
61 : LayerFlush,
62 :
63 : #[strum(serialize = "layer flush delay")]
64 : LayerFlushDelay,
65 :
66 : #[strum(serialize = "compact")]
67 : Compact,
68 :
69 : #[strum(serialize = "create images")]
70 : CreateImages,
71 :
72 : #[strum(serialize = "logical size")]
73 : LogicalSize,
74 :
75 : #[strum(serialize = "imitate logical size")]
76 : ImitateLogicalSize,
77 :
78 : #[strum(serialize = "load layer map")]
79 : LoadLayerMap,
80 :
81 : #[strum(serialize = "gc")]
82 : Gc,
83 :
84 : #[strum(serialize = "find gc cutoffs")]
85 : FindGcCutoffs,
86 : }
87 :
88 109 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
89 109 : register_counter_vec!(
90 : "pageserver_storage_operations_seconds_sum",
91 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
92 109 : &["operation", "tenant_id", "shard_id", "timeline_id"],
93 : )
94 109 : .expect("failed to define a metric")
95 109 : });
96 :
97 109 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
98 109 : register_int_counter_vec!(
99 : "pageserver_storage_operations_seconds_count",
100 : "Count of storage operations with operation, tenant and timeline dimensions",
101 109 : &["operation", "tenant_id", "shard_id", "timeline_id"],
102 : )
103 109 : .expect("failed to define a metric")
104 109 : });
105 :
106 : /* BEGIN_HADRON */
107 109 : pub(crate) static STORAGE_ACTIVE_COUNT_PER_TIMELINE: Lazy<IntGaugeVec> = Lazy::new(|| {
108 109 : register_int_gauge_vec!(
109 : "pageserver_active_storage_operations_count",
110 : "Count of active storage operations with operation, tenant and timeline dimensions",
111 109 : &["operation", "tenant_id", "shard_id", "timeline_id"],
112 : )
113 109 : .expect("failed to define a metric")
114 109 : });
115 : /*END_HADRON */
116 :
117 : // Buckets for background operations like compaction, GC, size calculation
118 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
119 :
120 109 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
121 109 : register_histogram_vec!(
122 : "pageserver_storage_operations_seconds_global",
123 : "Time spent on storage operations",
124 109 : &["operation"],
125 109 : STORAGE_OP_BUCKETS.into(),
126 : )
127 109 : .expect("failed to define a metric")
128 109 : });
129 :
130 : /// Measures layers visited per read (i.e. read amplification).
131 : ///
132 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
133 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
134 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
135 : /// care about.
136 109 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
137 109 : register_histogram_vec!(
138 : "pageserver_layers_per_read",
139 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
140 109 : &["tenant_id", "shard_id", "timeline_id"],
141 : // Low resolution to reduce cardinality.
142 109 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
143 : )
144 109 : .expect("failed to define a metric")
145 109 : });
146 :
147 107 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
148 107 : register_histogram!(
149 : "pageserver_layers_per_read_global",
150 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
151 107 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
152 : )
153 107 : .expect("failed to define a metric")
154 107 : });
155 :
156 107 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
157 107 : register_histogram!(
158 : "pageserver_layers_per_read_batch_global",
159 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
160 107 : vec![
161 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
162 : ],
163 : )
164 107 : .expect("failed to define a metric")
165 107 : });
166 :
167 107 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
168 107 : register_histogram!(
169 : "pageserver_layers_per_read_amortized_global",
170 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
171 : all visited layers are divided by number of reads.",
172 107 : vec![
173 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
174 : ],
175 : )
176 107 : .expect("failed to define a metric")
177 107 : });
178 :
179 107 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
180 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
181 107 : register_histogram!(
182 : "pageserver_deltas_per_read_global",
183 : "Number of delta pages applied to image page per read",
184 107 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
185 : )
186 107 : .expect("failed to define a metric")
187 107 : });
188 :
189 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
190 0 : register_uint_gauge!(
191 : "pageserver_concurrent_initdb",
192 : "Number of initdb processes running"
193 : )
194 0 : .expect("failed to define a metric")
195 0 : });
196 :
197 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
198 0 : register_histogram!(
199 : "pageserver_initdb_semaphore_seconds_global",
200 : "Time spent getting a permit from the global initdb semaphore",
201 0 : STORAGE_OP_BUCKETS.into()
202 : )
203 0 : .expect("failed to define metric")
204 0 : });
205 :
206 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
207 0 : register_histogram!(
208 : "pageserver_initdb_seconds_global",
209 : "Time spent performing initdb",
210 0 : STORAGE_OP_BUCKETS.into()
211 : )
212 0 : .expect("failed to define metric")
213 0 : });
214 :
215 : pub(crate) struct GetVectoredLatency {
216 : map: EnumMap<TaskKind, Option<Histogram>>,
217 : }
218 :
219 : #[allow(dead_code)]
220 : pub(crate) struct ScanLatency {
221 : map: EnumMap<TaskKind, Option<Histogram>>,
222 : }
223 :
224 : impl GetVectoredLatency {
225 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
226 : // cardinality of the metric.
227 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
228 :
229 10894 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
230 10894 : self.map[task_kind].as_ref()
231 10894 : }
232 : }
233 :
234 : impl ScanLatency {
235 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
236 : // cardinality of the metric.
237 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
238 :
239 8 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
240 8 : self.map[task_kind].as_ref()
241 8 : }
242 : }
243 :
244 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
245 : parent: &'a Histogram,
246 : start: std::time::Instant,
247 : }
248 :
249 : impl<'a> ScanLatencyOngoingRecording<'a> {
250 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
251 0 : let start = Instant::now();
252 0 : ScanLatencyOngoingRecording { parent, start }
253 0 : }
254 :
255 0 : pub(crate) fn observe(self) {
256 0 : let elapsed = self.start.elapsed();
257 0 : self.parent.observe(elapsed.as_secs_f64());
258 0 : }
259 : }
260 :
261 105 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
262 105 : let inner = register_histogram_vec!(
263 : "pageserver_get_vectored_seconds",
264 : "Time spent in get_vectored.",
265 105 : &["task_kind"],
266 105 : CRITICAL_OP_BUCKETS.into(),
267 : )
268 105 : .expect("failed to define a metric");
269 :
270 : GetVectoredLatency {
271 3360 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
272 3360 : let task_kind = TaskKind::from_usize(task_kind_idx);
273 :
274 3360 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
275 210 : let task_kind = task_kind.into();
276 210 : Some(inner.with_label_values(&[task_kind]))
277 : } else {
278 3150 : None
279 : }
280 3360 : })),
281 : }
282 105 : });
283 :
284 3 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
285 3 : let inner = register_histogram_vec!(
286 : "pageserver_scan_seconds",
287 : "Time spent in scan.",
288 3 : &["task_kind"],
289 3 : CRITICAL_OP_BUCKETS.into(),
290 : )
291 3 : .expect("failed to define a metric");
292 :
293 : ScanLatency {
294 96 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
295 96 : let task_kind = TaskKind::from_usize(task_kind_idx);
296 :
297 96 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
298 3 : let task_kind = task_kind.into();
299 3 : Some(inner.with_label_values(&[task_kind]))
300 : } else {
301 93 : None
302 : }
303 96 : })),
304 : }
305 3 : });
306 :
307 : pub(crate) struct PageCacheMetricsForTaskKind {
308 : pub read_accesses_immutable: IntCounter,
309 : pub read_hits_immutable: IntCounter,
310 : }
311 :
312 : pub(crate) struct PageCacheMetrics {
313 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
314 : }
315 :
316 50 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
317 50 : register_int_counter_vec!(
318 : "pageserver_page_cache_read_hits_total",
319 : "Number of read accesses to the page cache that hit",
320 50 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
321 : )
322 50 : .expect("failed to define a metric")
323 50 : });
324 :
325 50 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
326 50 : register_int_counter_vec!(
327 : "pageserver_page_cache_read_accesses_total",
328 : "Number of read accesses to the page cache",
329 50 : &["task_kind", "key_kind", "content_kind"]
330 : )
331 50 : .expect("failed to define a metric")
332 50 : });
333 :
334 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
335 1600 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
336 1600 : let task_kind = TaskKind::from_usize(task_kind);
337 1600 : let task_kind: &'static str = task_kind.into();
338 12800 : EnumMap::from_array(std::array::from_fn(|content_kind| {
339 12800 : let content_kind = PageContentKind::from_usize(content_kind);
340 12800 : let content_kind: &'static str = content_kind.into();
341 12800 : PageCacheMetricsForTaskKind {
342 12800 : read_accesses_immutable: {
343 12800 : PAGE_CACHE_READ_ACCESSES
344 12800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
345 12800 : .unwrap()
346 12800 : },
347 12800 :
348 12800 : read_hits_immutable: {
349 12800 : PAGE_CACHE_READ_HITS
350 12800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
351 12800 : .unwrap()
352 12800 : },
353 12800 : }
354 12800 : }))
355 1600 : })),
356 50 : });
357 :
358 : impl PageCacheMetrics {
359 591220 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
360 591220 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
361 591220 : }
362 : }
363 :
364 : pub(crate) struct PageCacheSizeMetrics {
365 : pub max_bytes: UIntGauge,
366 :
367 : pub current_bytes_immutable: UIntGauge,
368 : }
369 :
370 50 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
371 50 : register_uint_gauge_vec!(
372 : "pageserver_page_cache_size_current_bytes",
373 : "Current size of the page cache in bytes, by key kind",
374 50 : &["key_kind"]
375 : )
376 50 : .expect("failed to define a metric")
377 50 : });
378 :
379 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
380 : Lazy::new(|| PageCacheSizeMetrics {
381 : max_bytes: {
382 50 : register_uint_gauge!(
383 : "pageserver_page_cache_size_max_bytes",
384 : "Maximum size of the page cache in bytes"
385 : )
386 50 : .expect("failed to define a metric")
387 : },
388 : current_bytes_immutable: {
389 50 : PAGE_CACHE_SIZE_CURRENT_BYTES
390 50 : .get_metric_with_label_values(&["immutable"])
391 50 : .unwrap()
392 : },
393 50 : });
394 :
395 : pub(crate) mod page_cache_eviction_metrics {
396 : use std::num::NonZeroUsize;
397 :
398 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
399 : use once_cell::sync::Lazy;
400 :
401 : #[derive(Clone, Copy)]
402 : pub(crate) enum Outcome {
403 : FoundSlotUnused { iters: NonZeroUsize },
404 : FoundSlotEvicted { iters: NonZeroUsize },
405 : ItersExceeded { iters: NonZeroUsize },
406 : }
407 :
408 50 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
409 50 : register_int_counter_vec!(
410 : "pageserver_page_cache_find_victim_iters_total",
411 : "Counter for the number of iterations in the find_victim loop",
412 50 : &["outcome"],
413 : )
414 50 : .expect("failed to define a metric")
415 50 : });
416 :
417 50 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
418 50 : register_int_counter_vec!(
419 : "pageserver_page_cache_find_victim_calls",
420 : "Incremented at the end of each find_victim() call.\
421 : Filter by outcome to get e.g., eviction rate.",
422 50 : &["outcome"]
423 : )
424 50 : .unwrap()
425 50 : });
426 :
427 17319 : pub(crate) fn observe(outcome: Outcome) {
428 : macro_rules! dry {
429 : ($label:literal, $iters:expr) => {{
430 : static LABEL: &'static str = $label;
431 : static ITERS_TOTAL: Lazy<IntCounter> =
432 59 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
433 : static CALLS: Lazy<IntCounter> =
434 59 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
435 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
436 : CALLS.inc();
437 : }};
438 : }
439 17319 : match outcome {
440 820 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
441 16499 : Outcome::FoundSlotEvicted { iters } => {
442 16499 : dry!("found_evicted", iters)
443 : }
444 0 : Outcome::ItersExceeded { iters } => {
445 0 : dry!("err_iters_exceeded", iters);
446 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
447 0 : }
448 : }
449 17319 : }
450 : }
451 :
452 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
453 0 : register_int_counter_vec!(
454 : "page_cache_errors_total",
455 : "Number of timeouts while acquiring a pinned slot in the page cache",
456 0 : &["error_kind"]
457 : )
458 0 : .expect("failed to define a metric")
459 0 : });
460 :
461 0 : pub(crate) static FEATURE_FLAG_EVALUATION: Lazy<CounterVec> = Lazy::new(|| {
462 0 : register_counter_vec!(
463 : "pageserver_feature_flag_evaluation",
464 : "Number of times a feature flag is evaluated",
465 0 : &["flag_key", "status", "value"],
466 : )
467 0 : .unwrap()
468 0 : });
469 :
470 : #[derive(IntoStaticStr)]
471 : #[strum(serialize_all = "kebab_case")]
472 : pub(crate) enum PageCacheErrorKind {
473 : AcquirePinnedSlotTimeout,
474 : EvictIterLimit,
475 : }
476 :
477 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
478 0 : PAGE_CACHE_ERRORS
479 0 : .get_metric_with_label_values(&[error_kind.into()])
480 0 : .unwrap()
481 0 : .inc();
482 0 : }
483 :
484 11 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
485 11 : register_histogram!(
486 : "pageserver_wait_lsn_seconds",
487 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
488 11 : CRITICAL_OP_BUCKETS.into(),
489 : )
490 11 : .expect("failed to define a metric")
491 11 : });
492 :
493 109 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
494 109 : register_int_counter_pair_vec!(
495 : "pageserver_wait_lsn_started_count",
496 : "Number of wait_lsn operations started.",
497 : "pageserver_wait_lsn_finished_count",
498 : "Number of wait_lsn operations finished.",
499 109 : &["tenant_id", "shard_id", "timeline_id"],
500 : )
501 109 : .expect("failed to define a metric")
502 109 : });
503 :
504 109 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
505 109 : register_int_counter_vec!(
506 : "pageserver_wait_lsn_in_progress_micros",
507 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
508 109 : &["tenant_id", "shard_id", "timeline_id"],
509 : )
510 109 : .expect("failed to define a metric")
511 109 : });
512 :
513 109 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
514 109 : register_int_counter!(
515 : "pageserver_wait_lsn_in_progress_micros_global",
516 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
517 : )
518 109 : .expect("failed to define a metric")
519 109 : });
520 :
521 3 : pub(crate) static ONDEMAND_DOWNLOAD_BYTES: Lazy<IntCounterVec> = Lazy::new(|| {
522 3 : register_int_counter_vec!(
523 : "pageserver_ondemand_download_bytes_total",
524 : "Total bytes of layers on-demand downloaded",
525 3 : &["task_kind"]
526 : )
527 3 : .expect("failed to define a metric")
528 3 : });
529 :
530 3 : pub(crate) static ONDEMAND_DOWNLOAD_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
531 3 : register_int_counter_vec!(
532 : "pageserver_ondemand_download_count",
533 : "Total count of layers on-demand downloaded",
534 3 : &["task_kind"]
535 : )
536 3 : .expect("failed to define a metric")
537 3 : });
538 :
539 : pub(crate) mod wait_ondemand_download_time {
540 : use super::*;
541 : const WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS: &[f64] = &[
542 : 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, // 10 ms - 100ms
543 : 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, // 100ms to 1s
544 : 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, // 1s to 10s
545 : 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, // 10s to 1m
546 : ];
547 :
548 : /// The task kinds for which we want to track wait times for on-demand downloads.
549 : /// Other task kinds' wait times are accumulated in label value `unknown`.
550 : pub(crate) const WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS: [TaskKind; 2] = [
551 : TaskKind::PageRequestHandler,
552 : TaskKind::WalReceiverConnectionHandler,
553 : ];
554 :
555 0 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL: Lazy<Vec<Histogram>> = Lazy::new(|| {
556 0 : let histo = register_histogram_vec!(
557 : "pageserver_wait_ondemand_download_seconds_global",
558 : "Observations are individual tasks' wait times for on-demand downloads. \
559 : If N tasks coalesce on an on-demand download, and it takes 10s, than we observe N * 10s.",
560 0 : &["task_kind"],
561 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS.into(),
562 : )
563 0 : .expect("failed to define a metric");
564 0 : WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
565 0 : .iter()
566 0 : .map(|task_kind| histo.with_label_values(&[task_kind.into()]))
567 0 : .collect::<Vec<_>>()
568 0 : });
569 :
570 109 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_SUM: Lazy<CounterVec> = Lazy::new(|| {
571 109 : register_counter_vec!(
572 : // use a name that _could_ be evolved into a per-timeline histogram later
573 : "pageserver_wait_ondemand_download_seconds_sum",
574 : "Like `pageserver_wait_ondemand_download_seconds_global` but per timeline",
575 109 : &["tenant_id", "shard_id", "timeline_id", "task_kind"],
576 : )
577 109 : .unwrap()
578 109 : });
579 :
580 : pub struct WaitOndemandDownloadTimeSum {
581 : counters: [Counter; WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS.len()],
582 : }
583 :
584 : impl WaitOndemandDownloadTimeSum {
585 235 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
586 235 : let counters = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
587 235 : .iter()
588 470 : .map(|task_kind| {
589 470 : WAIT_ONDEMAND_DOWNLOAD_TIME_SUM
590 470 : .get_metric_with_label_values(&[
591 470 : tenant_id,
592 470 : shard_id,
593 470 : timeline_id,
594 470 : task_kind.into(),
595 470 : ])
596 470 : .unwrap()
597 470 : })
598 235 : .collect::<Vec<_>>();
599 235 : Self {
600 235 : counters: counters.try_into().unwrap(),
601 235 : }
602 235 : }
603 12 : pub(crate) fn observe(&self, task_kind: TaskKind, duration: Duration) {
604 12 : let maybe = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
605 12 : .iter()
606 12 : .enumerate()
607 24 : .find(|(_, kind)| **kind == task_kind);
608 12 : let Some((idx, _)) = maybe else {
609 12 : return;
610 : };
611 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL[idx].observe(duration.as_secs_f64());
612 0 : let counter = &self.counters[idx];
613 0 : counter.inc_by(duration.as_secs_f64());
614 12 : }
615 : }
616 :
617 5 : pub(crate) fn shutdown_timeline(tenant_id: &str, shard_id: &str, timeline_id: &str) {
618 15 : for task_kind in WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS {
619 10 : let _ = WAIT_ONDEMAND_DOWNLOAD_TIME_SUM.remove_label_values(&[
620 10 : tenant_id,
621 10 : shard_id,
622 10 : timeline_id,
623 10 : task_kind.into(),
624 10 : ]);
625 10 : }
626 5 : }
627 :
628 0 : pub(crate) fn preinitialize_global_metrics() {
629 0 : Lazy::force(&WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL);
630 0 : }
631 : }
632 :
633 109 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
634 109 : register_int_gauge_vec!(
635 : "pageserver_last_record_lsn",
636 : "Last record LSN grouped by timeline",
637 109 : &["tenant_id", "shard_id", "timeline_id"]
638 : )
639 109 : .expect("failed to define a metric")
640 109 : });
641 :
642 109 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
643 109 : register_int_gauge_vec!(
644 : "pageserver_disk_consistent_lsn",
645 : "Disk consistent LSN grouped by timeline",
646 109 : &["tenant_id", "shard_id", "timeline_id"]
647 : )
648 109 : .expect("failed to define a metric")
649 109 : });
650 :
651 109 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
652 109 : register_uint_gauge_vec!(
653 : "pageserver_projected_remote_consistent_lsn",
654 : "Projected remote consistent LSN grouped by timeline",
655 109 : &["tenant_id", "shard_id", "timeline_id"]
656 : )
657 109 : .expect("failed to define a metric")
658 109 : });
659 :
660 109 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
661 109 : register_uint_gauge_vec!(
662 : "pageserver_pitr_history_size",
663 : "Data written since PITR cutoff on this timeline",
664 109 : &["tenant_id", "shard_id", "timeline_id"]
665 : )
666 109 : .expect("failed to define a metric")
667 109 : });
668 :
669 : #[derive(
670 : strum_macros::EnumIter,
671 : strum_macros::EnumString,
672 : strum_macros::Display,
673 : strum_macros::IntoStaticStr,
674 : )]
675 : #[strum(serialize_all = "kebab_case")]
676 : pub(crate) enum LayerKind {
677 : Delta,
678 : Image,
679 : }
680 :
681 : #[derive(
682 : strum_macros::EnumIter,
683 : strum_macros::EnumString,
684 : strum_macros::Display,
685 : strum_macros::IntoStaticStr,
686 : )]
687 : #[strum(serialize_all = "kebab_case")]
688 : pub(crate) enum LayerLevel {
689 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
690 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
691 : Frozen,
692 : L0,
693 : L1,
694 : }
695 :
696 107 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
697 107 : register_uint_gauge_vec!(
698 : "pageserver_layer_bytes",
699 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
700 107 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
701 : )
702 107 : .expect("failed to define a metric")
703 107 : });
704 :
705 107 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
706 107 : register_uint_gauge_vec!(
707 : "pageserver_layer_count",
708 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
709 107 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
710 : )
711 107 : .expect("failed to define a metric")
712 107 : });
713 :
714 109 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
715 109 : register_uint_gauge_vec!(
716 : "pageserver_archive_size",
717 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
718 109 : &["tenant_id", "shard_id", "timeline_id"]
719 : )
720 109 : .expect("failed to define a metric")
721 109 : });
722 :
723 109 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
724 109 : register_int_gauge_vec!(
725 : "pageserver_standby_horizon",
726 : "Standby apply LSN for which GC is hold off, by timeline.",
727 109 : &["tenant_id", "shard_id", "timeline_id"]
728 : )
729 109 : .expect("failed to define a metric")
730 109 : });
731 :
732 109 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
733 109 : register_uint_gauge_vec!(
734 : "pageserver_resident_physical_size",
735 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
736 109 : &["tenant_id", "shard_id", "timeline_id"]
737 : )
738 109 : .expect("failed to define a metric")
739 109 : });
740 :
741 109 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
742 109 : register_uint_gauge_vec!(
743 : "pageserver_visible_physical_size",
744 : "The size of the layer files present in the pageserver's filesystem.",
745 109 : &["tenant_id", "shard_id", "timeline_id"]
746 : )
747 109 : .expect("failed to define a metric")
748 109 : });
749 :
750 107 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
751 107 : register_uint_gauge!(
752 : "pageserver_resident_physical_size_global",
753 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
754 : )
755 107 : .expect("failed to define a metric")
756 107 : });
757 :
758 109 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
759 109 : register_uint_gauge_vec!(
760 : "pageserver_remote_physical_size",
761 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
762 : // Corollary: If any files are missing from the index part, they won't be included here.
763 109 : &["tenant_id", "shard_id", "timeline_id"]
764 : )
765 109 : .expect("failed to define a metric")
766 109 : });
767 :
768 109 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
769 109 : register_uint_gauge!(
770 : "pageserver_remote_physical_size_global",
771 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
772 : )
773 109 : .expect("failed to define a metric")
774 109 : });
775 :
776 3 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
777 3 : register_int_counter!(
778 : "pageserver_remote_ondemand_downloaded_layers_total",
779 : "Total on-demand downloaded layers"
780 : )
781 3 : .unwrap()
782 3 : });
783 :
784 3 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
785 3 : register_int_counter!(
786 : "pageserver_remote_ondemand_downloaded_bytes_total",
787 : "Total bytes of layers on-demand downloaded",
788 : )
789 3 : .unwrap()
790 3 : });
791 :
792 109 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
793 109 : register_uint_gauge_vec!(
794 : "pageserver_current_logical_size",
795 : "Current logical size grouped by timeline",
796 109 : &["tenant_id", "shard_id", "timeline_id"]
797 : )
798 109 : .expect("failed to define current logical size metric")
799 109 : });
800 :
801 109 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
802 109 : register_int_gauge_vec!(
803 : "pageserver_aux_file_estimated_size",
804 : "The size of all aux files for a timeline in aux file v2 store.",
805 109 : &["tenant_id", "shard_id", "timeline_id"]
806 : )
807 109 : .expect("failed to define a metric")
808 109 : });
809 :
810 109 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
811 109 : register_uint_gauge_vec!(
812 : "pageserver_valid_lsn_lease_count",
813 : "The number of valid leases after refreshing gc info.",
814 109 : &["tenant_id", "shard_id", "timeline_id"],
815 : )
816 109 : .expect("failed to define a metric")
817 109 : });
818 :
819 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
820 0 : register_int_counter!(
821 : "pageserver_circuit_breaker_broken",
822 : "How many times a circuit breaker has broken"
823 : )
824 0 : .expect("failed to define a metric")
825 0 : });
826 :
827 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
828 0 : register_int_counter!(
829 : "pageserver_circuit_breaker_unbroken",
830 : "How many times a circuit breaker has been un-broken (recovered)"
831 : )
832 0 : .expect("failed to define a metric")
833 0 : });
834 :
835 105 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
836 105 : register_int_counter!(
837 : "pageserver_compression_image_in_bytes_total",
838 : "Size of data written into image layers before compression"
839 : )
840 105 : .expect("failed to define a metric")
841 105 : });
842 :
843 105 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
844 105 : register_int_counter!(
845 : "pageserver_compression_image_in_bytes_considered",
846 : "Size of potentially compressible data written into image layers before compression"
847 : )
848 105 : .expect("failed to define a metric")
849 105 : });
850 :
851 105 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
852 105 : register_int_counter!(
853 : "pageserver_compression_image_in_bytes_chosen",
854 : "Size of data whose compressed form was written into image layers"
855 : )
856 105 : .expect("failed to define a metric")
857 105 : });
858 :
859 105 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
860 105 : register_int_counter!(
861 : "pageserver_compression_image_out_bytes_total",
862 : "Size of compressed image layer written"
863 : )
864 105 : .expect("failed to define a metric")
865 105 : });
866 :
867 5 : pub(crate) static RELSIZE_LATEST_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
868 5 : register_uint_gauge!(
869 : "pageserver_relsize_latest_cache_entries",
870 : "Number of entries in the latest relation size cache",
871 : )
872 5 : .expect("failed to define a metric")
873 5 : });
874 :
875 5 : pub(crate) static RELSIZE_LATEST_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
876 5 : register_int_counter!(
877 : "pageserver_relsize_latest_cache_hits",
878 : "Latest relation size cache hits",
879 : )
880 5 : .expect("failed to define a metric")
881 5 : });
882 :
883 4 : pub(crate) static RELSIZE_LATEST_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
884 4 : register_int_counter!(
885 : "pageserver_relsize_latest_cache_misses",
886 : "Relation size latest cache misses",
887 : )
888 4 : .expect("failed to define a metric")
889 4 : });
890 :
891 2 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
892 2 : register_uint_gauge!(
893 : "pageserver_relsize_snapshot_cache_entries",
894 : "Number of entries in the pitr relation size cache",
895 : )
896 2 : .expect("failed to define a metric")
897 2 : });
898 :
899 2 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
900 2 : register_int_counter!(
901 : "pageserver_relsize_snapshot_cache_hits",
902 : "Pitr relation size cache hits",
903 : )
904 2 : .expect("failed to define a metric")
905 2 : });
906 :
907 3 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
908 3 : register_int_counter!(
909 : "pageserver_relsize_snapshot_cache_misses",
910 : "Relation size snapshot cache misses",
911 : )
912 3 : .expect("failed to define a metric")
913 3 : });
914 :
915 2 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
916 2 : register_int_counter!(
917 : "pageserver_relsize_cache_misses_old",
918 : "Relation size cache misses where the lookup LSN is older than the last relation update"
919 : )
920 2 : .expect("failed to define a metric")
921 2 : });
922 :
923 : pub(crate) mod initial_logical_size {
924 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
925 : use once_cell::sync::Lazy;
926 :
927 : pub(crate) struct StartCalculation(IntCounterVec);
928 109 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
929 109 : StartCalculation(
930 109 : register_int_counter_vec!(
931 109 : "pageserver_initial_logical_size_start_calculation",
932 109 : "Incremented each time we start an initial logical size calculation attempt. \
933 109 : The `circumstances` label provides some additional details.",
934 109 : &["attempt", "circumstances"]
935 109 : )
936 109 : .unwrap(),
937 109 : )
938 109 : });
939 :
940 : struct DropCalculation {
941 : first: IntCounter,
942 : retry: IntCounter,
943 : }
944 :
945 109 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
946 109 : let vec = register_int_counter_vec!(
947 : "pageserver_initial_logical_size_drop_calculation",
948 : "Incremented each time we abort a started size calculation attmpt.",
949 109 : &["attempt"]
950 : )
951 109 : .unwrap();
952 109 : DropCalculation {
953 109 : first: vec.with_label_values(&["first"]),
954 109 : retry: vec.with_label_values(&["retry"]),
955 109 : }
956 109 : });
957 :
958 : pub(crate) struct Calculated {
959 : pub(crate) births: IntCounter,
960 : pub(crate) deaths: IntCounter,
961 : }
962 :
963 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
964 109 : births: register_int_counter!(
965 : "pageserver_initial_logical_size_finish_calculation",
966 : "Incremented every time we finish calculation of initial logical size.\
967 : If everything is working well, this should happen at most once per Timeline object."
968 : )
969 109 : .unwrap(),
970 109 : deaths: register_int_counter!(
971 : "pageserver_initial_logical_size_drop_finished_calculation",
972 : "Incremented when we drop a finished initial logical size calculation result.\
973 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
974 : )
975 109 : .unwrap(),
976 109 : });
977 :
978 : pub(crate) struct OngoingCalculationGuard {
979 : inc_drop_calculation: Option<IntCounter>,
980 : }
981 :
982 : #[derive(strum_macros::IntoStaticStr)]
983 : pub(crate) enum StartCircumstances {
984 : EmptyInitial,
985 : SkippedConcurrencyLimiter,
986 : AfterBackgroundTasksRateLimit,
987 : }
988 :
989 : impl StartCalculation {
990 115 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
991 115 : let circumstances_label: &'static str = circumstances.into();
992 115 : self.0
993 115 : .with_label_values(&["first", circumstances_label])
994 115 : .inc();
995 115 : OngoingCalculationGuard {
996 115 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
997 115 : }
998 115 : }
999 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
1000 0 : let circumstances_label: &'static str = circumstances.into();
1001 0 : self.0
1002 0 : .with_label_values(&["retry", circumstances_label])
1003 0 : .inc();
1004 0 : OngoingCalculationGuard {
1005 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
1006 0 : }
1007 0 : }
1008 : }
1009 :
1010 : impl Drop for OngoingCalculationGuard {
1011 115 : fn drop(&mut self) {
1012 115 : if let Some(counter) = self.inc_drop_calculation.take() {
1013 0 : counter.inc();
1014 115 : }
1015 115 : }
1016 : }
1017 :
1018 : impl OngoingCalculationGuard {
1019 115 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
1020 115 : drop(self.inc_drop_calculation.take());
1021 115 : CALCULATED.births.inc();
1022 115 : FinishedCalculationGuard {
1023 115 : inc_on_drop: CALCULATED.deaths.clone(),
1024 115 : }
1025 115 : }
1026 : }
1027 :
1028 : pub(crate) struct FinishedCalculationGuard {
1029 : inc_on_drop: IntCounter,
1030 : }
1031 :
1032 : impl Drop for FinishedCalculationGuard {
1033 3 : fn drop(&mut self) {
1034 3 : self.inc_on_drop.inc();
1035 3 : }
1036 : }
1037 :
1038 : // context: https://github.com/neondatabase/neon/issues/5963
1039 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
1040 0 : Lazy::new(|| {
1041 0 : register_int_counter!(
1042 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
1043 : "Counter for the following event: walreceiver calls\
1044 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
1045 : )
1046 0 : .unwrap()
1047 0 : });
1048 : }
1049 :
1050 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
1051 0 : register_uint_gauge_vec!(
1052 : "pageserver_directory_entries_count",
1053 : "Sum of the entries in pageserver-stored directory listings",
1054 0 : &["tenant_id", "shard_id", "timeline_id"]
1055 : )
1056 0 : .expect("failed to define a metric")
1057 0 : });
1058 :
1059 110 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1060 110 : register_uint_gauge_vec!(
1061 : "pageserver_tenant_states_count",
1062 : "Count of tenants per state",
1063 110 : &["state"]
1064 : )
1065 110 : .expect("Failed to register pageserver_tenant_states_count metric")
1066 110 : });
1067 :
1068 109 : pub(crate) static TIMELINE_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1069 109 : register_uint_gauge_vec!(
1070 : "pageserver_timeline_states_count",
1071 : "Count of timelines per state",
1072 109 : &["state"]
1073 : )
1074 109 : .expect("Failed to register pageserver_timeline_states_count metric")
1075 109 : });
1076 :
1077 : /// A set of broken tenants.
1078 : ///
1079 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
1080 : /// tenant.
1081 5 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
1082 5 : register_uint_gauge_vec!(
1083 : "pageserver_broken_tenants_count",
1084 : "Set of broken tenants",
1085 5 : &["tenant_id", "shard_id"]
1086 : )
1087 5 : .expect("Failed to register pageserver_tenant_states_count metric")
1088 5 : });
1089 :
1090 3 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1091 3 : register_uint_gauge_vec!(
1092 : "pageserver_tenant_synthetic_cached_size_bytes",
1093 : "Synthetic size of each tenant in bytes",
1094 3 : &["tenant_id"]
1095 : )
1096 3 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
1097 3 : });
1098 :
1099 110 : pub(crate) static TENANT_OFFLOADED_TIMELINES: Lazy<UIntGaugeVec> = Lazy::new(|| {
1100 110 : register_uint_gauge_vec!(
1101 : "pageserver_tenant_offloaded_timelines",
1102 : "Number of offloaded timelines of a tenant",
1103 110 : &["tenant_id", "shard_id"]
1104 : )
1105 110 : .expect("Failed to register pageserver_tenant_offloaded_timelines metric")
1106 110 : });
1107 :
1108 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
1109 0 : register_histogram_vec!(
1110 : "pageserver_eviction_iteration_duration_seconds_global",
1111 : "Time spent on a single eviction iteration",
1112 0 : &["period_secs", "threshold_secs"],
1113 0 : STORAGE_OP_BUCKETS.into(),
1114 : )
1115 0 : .expect("failed to define a metric")
1116 0 : });
1117 :
1118 109 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
1119 109 : register_int_counter_vec!(
1120 : "pageserver_evictions",
1121 : "Number of layers evicted from the pageserver",
1122 109 : &["tenant_id", "shard_id", "timeline_id"]
1123 : )
1124 109 : .expect("failed to define a metric")
1125 109 : });
1126 :
1127 109 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
1128 109 : register_int_counter_vec!(
1129 : "pageserver_evictions_with_low_residence_duration",
1130 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
1131 : Residence duration is determined using the `residence_duration_data_source`.",
1132 109 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
1133 : )
1134 109 : .expect("failed to define a metric")
1135 109 : });
1136 :
1137 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
1138 0 : register_int_counter!(
1139 : "pageserver_unexpected_ondemand_downloads_count",
1140 : "Number of unexpected on-demand downloads. \
1141 : We log more context for each increment, so, forgo any labels in this metric.",
1142 : )
1143 0 : .expect("failed to define a metric")
1144 0 : });
1145 :
1146 : /// How long did we take to start up? Broken down by labels to describe
1147 : /// different phases of startup.
1148 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
1149 0 : register_gauge_vec!(
1150 : "pageserver_startup_duration_seconds",
1151 : "Time taken by phases of pageserver startup, in seconds",
1152 0 : &["phase"]
1153 : )
1154 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
1155 0 : });
1156 :
1157 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
1158 0 : register_uint_gauge!(
1159 : "pageserver_startup_is_loading",
1160 : "1 while in initial startup load of tenants, 0 at other times"
1161 : )
1162 0 : .expect("Failed to register pageserver_startup_is_loading")
1163 0 : });
1164 :
1165 111 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
1166 111 : register_uint_gauge!(
1167 : "pageserver_timeline_ephemeral_bytes",
1168 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
1169 : )
1170 111 : .expect("Failed to register metric")
1171 111 : });
1172 :
1173 : /// Metrics related to the lifecycle of a [`crate::tenant::TenantShard`] object: things
1174 : /// like how long it took to load.
1175 : ///
1176 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1177 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1178 : /// at a timeline level than tenant level.
1179 : pub(crate) struct TenantMetrics {
1180 : /// How long did tenants take to go from construction to active state?
1181 : pub(crate) activation: Histogram,
1182 : pub(crate) preload: Histogram,
1183 : pub(crate) attach: Histogram,
1184 :
1185 : /// How many tenants are included in the initial startup of the pagesrever?
1186 : pub(crate) startup_scheduled: IntCounter,
1187 : pub(crate) startup_complete: IntCounter,
1188 : }
1189 :
1190 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1191 0 : TenantMetrics {
1192 0 : activation: register_histogram!(
1193 0 : "pageserver_tenant_activation_seconds",
1194 0 : "Time taken by tenants to activate, in seconds",
1195 0 : CRITICAL_OP_BUCKETS.into()
1196 0 : )
1197 0 : .expect("Failed to register metric"),
1198 0 : preload: register_histogram!(
1199 0 : "pageserver_tenant_preload_seconds",
1200 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1201 0 : CRITICAL_OP_BUCKETS.into()
1202 0 : )
1203 0 : .expect("Failed to register metric"),
1204 0 : attach: register_histogram!(
1205 0 : "pageserver_tenant_attach_seconds",
1206 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1207 0 : CRITICAL_OP_BUCKETS.into()
1208 0 : )
1209 0 : .expect("Failed to register metric"),
1210 0 : startup_scheduled: register_int_counter!(
1211 0 : "pageserver_tenant_startup_scheduled",
1212 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1213 0 : ).expect("Failed to register metric"),
1214 0 : startup_complete: register_int_counter!(
1215 0 : "pageserver_tenant_startup_complete",
1216 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1217 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1218 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1219 0 : ).expect("Failed to register metric"),
1220 0 : }
1221 0 : });
1222 :
1223 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1224 : #[derive(Debug)]
1225 : pub(crate) struct EvictionsWithLowResidenceDuration {
1226 : data_source: &'static str,
1227 : threshold: Duration,
1228 : counter: Option<IntCounter>,
1229 : }
1230 :
1231 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1232 : data_source: &'static str,
1233 : threshold: Duration,
1234 : }
1235 :
1236 : impl EvictionsWithLowResidenceDurationBuilder {
1237 235 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1238 235 : Self {
1239 235 : data_source,
1240 235 : threshold,
1241 235 : }
1242 235 : }
1243 :
1244 235 : fn build(
1245 235 : &self,
1246 235 : tenant_id: &str,
1247 235 : shard_id: &str,
1248 235 : timeline_id: &str,
1249 235 : ) -> EvictionsWithLowResidenceDuration {
1250 235 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1251 235 : .get_metric_with_label_values(&[
1252 235 : tenant_id,
1253 235 : shard_id,
1254 235 : timeline_id,
1255 235 : self.data_source,
1256 235 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1257 235 : ])
1258 235 : .unwrap();
1259 235 : EvictionsWithLowResidenceDuration {
1260 235 : data_source: self.data_source,
1261 235 : threshold: self.threshold,
1262 235 : counter: Some(counter),
1263 235 : }
1264 235 : }
1265 : }
1266 :
1267 : impl EvictionsWithLowResidenceDuration {
1268 240 : fn threshold_label_value(threshold: Duration) -> String {
1269 240 : format!("{}", threshold.as_secs())
1270 240 : }
1271 :
1272 2 : pub fn observe(&self, observed_value: Duration) {
1273 2 : if observed_value < self.threshold {
1274 2 : self.counter
1275 2 : .as_ref()
1276 2 : .expect("nobody calls this function after `remove_from_vec`")
1277 2 : .inc();
1278 2 : }
1279 2 : }
1280 :
1281 0 : pub fn change_threshold(
1282 0 : &mut self,
1283 0 : tenant_id: &str,
1284 0 : shard_id: &str,
1285 0 : timeline_id: &str,
1286 0 : new_threshold: Duration,
1287 0 : ) {
1288 0 : if new_threshold == self.threshold {
1289 0 : return;
1290 0 : }
1291 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1292 0 : self.data_source,
1293 0 : new_threshold,
1294 0 : )
1295 0 : .build(tenant_id, shard_id, timeline_id);
1296 0 : std::mem::swap(self, &mut with_new);
1297 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1298 0 : }
1299 :
1300 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1301 5 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1302 5 : let Some(_counter) = self.counter.take() else {
1303 0 : return;
1304 : };
1305 :
1306 5 : let threshold = Self::threshold_label_value(self.threshold);
1307 :
1308 5 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1309 5 : tenant_id,
1310 5 : shard_id,
1311 5 : timeline_id,
1312 5 : self.data_source,
1313 5 : &threshold,
1314 5 : ]);
1315 :
1316 5 : match removed {
1317 0 : Err(e) => {
1318 : // this has been hit in staging as
1319 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1320 : // because we can be in the drop path already, don't risk:
1321 : // - "double-panic => illegal instruction" or
1322 : // - future "drop panick => abort"
1323 : //
1324 : // so just nag: (the error has the labels)
1325 0 : tracing::warn!(
1326 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1327 : );
1328 : }
1329 : Ok(()) => {
1330 : // to help identify cases where we double-remove the same values, let's log all
1331 : // deletions?
1332 5 : tracing::info!(
1333 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1334 : self.data_source
1335 : );
1336 : }
1337 : }
1338 5 : }
1339 : }
1340 :
1341 : // Metrics collected on disk IO operations
1342 : //
1343 : // Roughly logarithmic scale.
1344 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1345 : 0.00005, // 50us
1346 : 0.00006, // 60us
1347 : 0.00007, // 70us
1348 : 0.00008, // 80us
1349 : 0.00009, // 90us
1350 : 0.0001, // 100us
1351 : 0.000110, // 110us
1352 : 0.000120, // 120us
1353 : 0.000130, // 130us
1354 : 0.000140, // 140us
1355 : 0.000150, // 150us
1356 : 0.000160, // 160us
1357 : 0.000170, // 170us
1358 : 0.000180, // 180us
1359 : 0.000190, // 190us
1360 : 0.000200, // 200us
1361 : 0.000210, // 210us
1362 : 0.000220, // 220us
1363 : 0.000230, // 230us
1364 : 0.000240, // 240us
1365 : 0.000250, // 250us
1366 : 0.000300, // 300us
1367 : 0.000350, // 350us
1368 : 0.000400, // 400us
1369 : 0.000450, // 450us
1370 : 0.000500, // 500us
1371 : 0.000600, // 600us
1372 : 0.000700, // 700us
1373 : 0.000800, // 800us
1374 : 0.000900, // 900us
1375 : 0.001000, // 1ms
1376 : 0.002000, // 2ms
1377 : 0.003000, // 3ms
1378 : 0.004000, // 4ms
1379 : 0.005000, // 5ms
1380 : 0.01000, // 10ms
1381 : 0.02000, // 20ms
1382 : 0.05000, // 50ms
1383 : ];
1384 :
1385 : /// VirtualFile fs operation variants.
1386 : ///
1387 : /// Operations:
1388 : /// - open ([`std::fs::OpenOptions::open`])
1389 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1390 : /// - close-by-replace (close by replacement algorithm)
1391 : /// - read (`read_at`)
1392 : /// - write (`write_at`)
1393 : /// - seek (modify internal position or file length query)
1394 : /// - fsync ([`std::fs::File::sync_all`])
1395 : /// - metadata ([`std::fs::File::metadata`])
1396 : #[derive(
1397 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1398 : )]
1399 : pub(crate) enum StorageIoOperation {
1400 : Open,
1401 : OpenAfterReplace,
1402 : Close,
1403 : CloseByReplace,
1404 : Read,
1405 : Write,
1406 : Seek,
1407 : Fsync,
1408 : Metadata,
1409 : SetLen,
1410 : }
1411 :
1412 : impl StorageIoOperation {
1413 1220 : pub fn as_str(&self) -> &'static str {
1414 1220 : match self {
1415 122 : StorageIoOperation::Open => "open",
1416 122 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1417 122 : StorageIoOperation::Close => "close",
1418 122 : StorageIoOperation::CloseByReplace => "close-by-replace",
1419 122 : StorageIoOperation::Read => "read",
1420 122 : StorageIoOperation::Write => "write",
1421 122 : StorageIoOperation::Seek => "seek",
1422 122 : StorageIoOperation::Fsync => "fsync",
1423 122 : StorageIoOperation::Metadata => "metadata",
1424 122 : StorageIoOperation::SetLen => "set_len",
1425 : }
1426 1220 : }
1427 : }
1428 :
1429 : /// Tracks time taken by fs operations near VirtualFile.
1430 : #[derive(Debug)]
1431 : pub(crate) struct StorageIoTime {
1432 : metrics: [Histogram; StorageIoOperation::COUNT],
1433 : }
1434 :
1435 : impl StorageIoTime {
1436 122 : fn new() -> Self {
1437 122 : let storage_io_histogram_vec = register_histogram_vec!(
1438 : "pageserver_io_operations_seconds",
1439 : "Time spent in IO operations",
1440 122 : &["operation"],
1441 122 : STORAGE_IO_TIME_BUCKETS.into()
1442 : )
1443 122 : .expect("failed to define a metric");
1444 1220 : let metrics = std::array::from_fn(|i| {
1445 1220 : let op = StorageIoOperation::from_repr(i).unwrap();
1446 1220 : storage_io_histogram_vec
1447 1220 : .get_metric_with_label_values(&[op.as_str()])
1448 1220 : .unwrap()
1449 1220 : });
1450 122 : Self { metrics }
1451 122 : }
1452 :
1453 498667 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1454 498667 : &self.metrics[op as usize]
1455 498667 : }
1456 : }
1457 :
1458 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1459 :
1460 : #[derive(Clone, Copy)]
1461 : #[repr(usize)]
1462 : pub(crate) enum StorageIoSizeOperation {
1463 : Read,
1464 : Write,
1465 : }
1466 :
1467 : impl StorageIoSizeOperation {
1468 : pub(crate) const VARIANTS: &'static [&'static str] = &["read", "write"];
1469 :
1470 756 : fn as_str(&self) -> &'static str {
1471 756 : Self::VARIANTS[*self as usize]
1472 756 : }
1473 : }
1474 :
1475 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1476 143 : pub(crate) static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1477 143 : register_uint_gauge_vec!(
1478 : "pageserver_io_operations_bytes_total",
1479 : "Total amount of bytes read/written in IO operations",
1480 143 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1481 : )
1482 143 : .expect("failed to define a metric")
1483 143 : });
1484 :
1485 : #[derive(Clone, Debug)]
1486 : pub(crate) struct StorageIoSizeMetrics {
1487 : pub read: UIntGauge,
1488 : pub write: UIntGauge,
1489 : }
1490 :
1491 : impl StorageIoSizeMetrics {
1492 378 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1493 378 : let read = STORAGE_IO_SIZE
1494 378 : .get_metric_with_label_values(&[
1495 378 : StorageIoSizeOperation::Read.as_str(),
1496 378 : tenant_id,
1497 378 : shard_id,
1498 378 : timeline_id,
1499 378 : ])
1500 378 : .unwrap();
1501 378 : let write = STORAGE_IO_SIZE
1502 378 : .get_metric_with_label_values(&[
1503 378 : StorageIoSizeOperation::Write.as_str(),
1504 378 : tenant_id,
1505 378 : shard_id,
1506 378 : timeline_id,
1507 378 : ])
1508 378 : .unwrap();
1509 378 : Self { read, write }
1510 378 : }
1511 : }
1512 :
1513 : #[cfg(not(test))]
1514 : pub(crate) mod virtual_file_descriptor_cache {
1515 : use super::*;
1516 :
1517 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1518 0 : register_uint_gauge!(
1519 : "pageserver_virtual_file_descriptor_cache_size_max",
1520 : "Maximum number of open file descriptors in the cache."
1521 : )
1522 0 : .unwrap()
1523 0 : });
1524 :
1525 : // SIZE_CURRENT: derive it like so:
1526 : // ```
1527 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1528 : // -ignoring(operation)
1529 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1530 : // ```
1531 : }
1532 :
1533 : #[cfg(not(test))]
1534 : pub(crate) mod virtual_file_io_engine {
1535 : use super::*;
1536 :
1537 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1538 0 : register_uint_gauge_vec!(
1539 : "pageserver_virtual_file_io_engine_kind",
1540 : "The configured io engine for VirtualFile",
1541 0 : &["kind"],
1542 : )
1543 0 : .unwrap()
1544 0 : });
1545 : }
1546 :
1547 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1548 : pub(crate) struct SmgrOpTimerInner {
1549 : global_execution_latency_histo: Histogram,
1550 : per_timeline_execution_latency_histo: Option<Histogram>,
1551 :
1552 : global_batch_wait_time: Histogram,
1553 : per_timeline_batch_wait_time: Histogram,
1554 :
1555 : global_flush_in_progress_micros: IntCounter,
1556 : per_timeline_flush_in_progress_micros: IntCounter,
1557 :
1558 : throttling: Arc<tenant_throttling::Pagestream>,
1559 :
1560 : timings: SmgrOpTimerState,
1561 : }
1562 :
1563 : /// The stages of request processing are represented by the enum variants.
1564 : /// Used as part of [`SmgrOpTimerInner::timings`].
1565 : ///
1566 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1567 : /// transition points.
1568 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1569 : /// to the next state.
1570 : ///
1571 : /// Each request goes through every stage, in all configurations.
1572 : ///
1573 : #[derive(Debug)]
1574 : enum SmgrOpTimerState {
1575 : Received {
1576 : // In the future, we may want to track the full time the request spent
1577 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1578 : // `received_at` would be used for that.
1579 : #[allow(dead_code)]
1580 : received_at: Instant,
1581 : },
1582 : Throttling {
1583 : throttle_started_at: Instant,
1584 : },
1585 : Batching {
1586 : throttle_done_at: Instant,
1587 : },
1588 : Executing {
1589 : execution_started_at: Instant,
1590 : },
1591 : Flushing,
1592 : // NB: when adding observation points, remember to update the Drop impl.
1593 : }
1594 :
1595 : // NB: when adding observation points, remember to update the Drop impl.
1596 : impl SmgrOpTimer {
1597 : /// See [`SmgrOpTimerState`] for more context.
1598 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1599 0 : let Some(inner) = self.0.as_mut() else {
1600 0 : return;
1601 : };
1602 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1603 0 : return;
1604 : };
1605 0 : inner.throttling.count_accounted_start.inc();
1606 0 : inner.timings = SmgrOpTimerState::Throttling {
1607 0 : throttle_started_at: at,
1608 0 : };
1609 0 : }
1610 :
1611 : /// See [`SmgrOpTimerState`] for more context.
1612 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1613 0 : let Some(inner) = self.0.as_mut() else {
1614 0 : return;
1615 : };
1616 : let SmgrOpTimerState::Throttling {
1617 0 : throttle_started_at,
1618 0 : } = &inner.timings
1619 : else {
1620 0 : return;
1621 : };
1622 0 : inner.throttling.count_accounted_finish.inc();
1623 0 : match throttle {
1624 0 : ThrottleResult::NotThrottled { end } => {
1625 0 : inner.timings = SmgrOpTimerState::Batching {
1626 0 : throttle_done_at: end,
1627 0 : };
1628 0 : }
1629 0 : ThrottleResult::Throttled { end } => {
1630 0 : // update metrics
1631 0 : inner.throttling.count_throttled.inc();
1632 0 : inner
1633 0 : .throttling
1634 0 : .wait_time
1635 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1636 0 : // state transition
1637 0 : inner.timings = SmgrOpTimerState::Batching {
1638 0 : throttle_done_at: end,
1639 0 : };
1640 0 : }
1641 : }
1642 0 : }
1643 :
1644 : /// See [`SmgrOpTimerState`] for more context.
1645 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1646 0 : let Some(inner) = self.0.as_mut() else {
1647 0 : return;
1648 : };
1649 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1650 0 : return;
1651 : };
1652 : // update metrics
1653 0 : let batch = at - *throttle_done_at;
1654 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1655 0 : inner
1656 0 : .per_timeline_batch_wait_time
1657 0 : .observe(batch.as_secs_f64());
1658 : // state transition
1659 0 : inner.timings = SmgrOpTimerState::Executing {
1660 0 : execution_started_at: at,
1661 0 : }
1662 0 : }
1663 :
1664 : /// For all but the first caller, this is a no-op.
1665 : /// The first callers receives Some, subsequent ones None.
1666 : ///
1667 : /// See [`SmgrOpTimerState`] for more context.
1668 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1669 : // NB: unlike the other observe_* methods, this one take()s.
1670 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1671 0 : let Some(mut inner) = self.0.take() else {
1672 0 : return None;
1673 : };
1674 : let SmgrOpTimerState::Executing {
1675 0 : execution_started_at,
1676 0 : } = &inner.timings
1677 : else {
1678 0 : return None;
1679 : };
1680 : // update metrics
1681 0 : let execution = at - *execution_started_at;
1682 0 : inner
1683 0 : .global_execution_latency_histo
1684 0 : .observe(execution.as_secs_f64());
1685 0 : if let Some(per_timeline_execution_latency_histo) =
1686 0 : &inner.per_timeline_execution_latency_histo
1687 0 : {
1688 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1689 0 : }
1690 :
1691 : // state transition
1692 0 : inner.timings = SmgrOpTimerState::Flushing;
1693 :
1694 : // return the flush in progress object which
1695 : // will do the remaining metrics updates
1696 : let SmgrOpTimerInner {
1697 0 : global_flush_in_progress_micros,
1698 0 : per_timeline_flush_in_progress_micros,
1699 : ..
1700 0 : } = inner;
1701 0 : Some(SmgrOpFlushInProgress {
1702 0 : global_micros: global_flush_in_progress_micros,
1703 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1704 0 : })
1705 0 : }
1706 : }
1707 :
1708 : /// The last stage of request processing is serializing and flushing the request
1709 : /// into the TCP connection. We want to make slow flushes observable
1710 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1711 : /// to periodically bump the metric.
1712 : ///
1713 : /// If in the future we decide that we're not interested in live updates, we can
1714 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1715 : /// and remove this struct from the code base.
1716 : pub(crate) struct SmgrOpFlushInProgress {
1717 : global_micros: IntCounter,
1718 : per_timeline_micros: IntCounter,
1719 : }
1720 :
1721 : impl Drop for SmgrOpTimer {
1722 0 : fn drop(&mut self) {
1723 : // In case of early drop, update any of the remaining metrics with
1724 : // observations so that (started,finished) counter pairs balance out
1725 : // and all counters on the latency path have the the same number of
1726 : // observations.
1727 : // It's technically lying and it would be better if each metric had
1728 : // a separate label or similar for cancelled requests.
1729 : // But we don't have that right now and counter pairs balancing
1730 : // out is useful when using the metrics in panels and whatnot.
1731 0 : let now = Instant::now();
1732 0 : self.observe_throttle_start(now);
1733 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1734 0 : self.observe_execution_start(now);
1735 0 : let maybe_flush_timer = self.observe_execution_end(now);
1736 0 : drop(maybe_flush_timer);
1737 0 : }
1738 : }
1739 :
1740 : impl SmgrOpFlushInProgress {
1741 : /// The caller must guarantee that `socket_fd`` outlives this function.
1742 0 : pub(crate) async fn measure<Fut, O>(self, started_at: Instant, fut: Fut, socket_fd: RawFd) -> O
1743 0 : where
1744 0 : Fut: std::future::Future<Output = O>,
1745 0 : {
1746 0 : let mut fut = std::pin::pin!(fut);
1747 :
1748 0 : let mut logged = false;
1749 0 : let mut last_counter_increment_at = started_at;
1750 0 : let mut observe_guard = scopeguard::guard(
1751 0 : |is_timeout| {
1752 0 : let now = Instant::now();
1753 :
1754 : // Increment counter
1755 0 : {
1756 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1757 0 : self.global_micros
1758 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1759 0 : self.per_timeline_micros
1760 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1761 0 : last_counter_increment_at = now;
1762 0 : }
1763 :
1764 : // Log something on every timeout, and on completion but only if we hit a timeout.
1765 0 : if is_timeout || logged {
1766 0 : logged = true;
1767 0 : let elapsed_total = now - started_at;
1768 0 : let msg = if is_timeout {
1769 0 : "slow flush ongoing"
1770 : } else {
1771 0 : "slow flush completed or cancelled"
1772 : };
1773 :
1774 0 : let (inq, outq) = {
1775 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1776 0 : #[cfg(target_os = "linux")]
1777 0 : unsafe {
1778 0 : (
1779 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1780 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1781 0 : )
1782 0 : }
1783 0 : #[cfg(not(target_os = "linux"))]
1784 0 : {
1785 0 : _ = socket_fd; // appease unused lint on macOS
1786 0 : (-1, -1)
1787 0 : }
1788 0 : };
1789 :
1790 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1791 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1792 0 : }
1793 0 : },
1794 0 : |mut observe| {
1795 0 : observe(false);
1796 0 : },
1797 : );
1798 :
1799 : loop {
1800 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1801 0 : Ok(v) => return v,
1802 0 : Err(_timeout) => {
1803 0 : (*observe_guard)(true);
1804 0 : }
1805 : }
1806 : }
1807 0 : }
1808 : }
1809 :
1810 : #[derive(
1811 : Debug,
1812 : Clone,
1813 : Copy,
1814 : IntoStaticStr,
1815 : strum_macros::EnumCount,
1816 : strum_macros::EnumIter,
1817 : strum_macros::FromRepr,
1818 : enum_map::Enum,
1819 : )]
1820 : #[strum(serialize_all = "snake_case")]
1821 : pub enum SmgrQueryType {
1822 : GetRelExists,
1823 : GetRelSize,
1824 : GetPageAtLsn,
1825 : GetDbSize,
1826 : GetSlruSegment,
1827 : #[cfg(feature = "testing")]
1828 : Test,
1829 : }
1830 :
1831 : #[derive(
1832 : Debug,
1833 : Clone,
1834 : Copy,
1835 : IntoStaticStr,
1836 : strum_macros::EnumCount,
1837 : strum_macros::EnumIter,
1838 : strum_macros::FromRepr,
1839 : enum_map::Enum,
1840 : )]
1841 : #[strum(serialize_all = "snake_case")]
1842 : pub enum GetPageBatchBreakReason {
1843 : BatchFull,
1844 : NonBatchableRequest,
1845 : NonUniformLsn,
1846 : SamePageAtDifferentLsn,
1847 : NonUniformTimeline,
1848 : ExecutorSteal,
1849 : #[cfg(feature = "testing")]
1850 : NonUniformKey,
1851 : }
1852 :
1853 : pub(crate) struct SmgrQueryTimePerTimeline {
1854 : global_started: [IntCounter; SmgrQueryType::COUNT],
1855 : global_latency: [Histogram; SmgrQueryType::COUNT],
1856 : per_timeline_getpage_started: IntCounter,
1857 : per_timeline_getpage_latency: Histogram,
1858 : global_batch_size: Histogram,
1859 : per_timeline_batch_size: Histogram,
1860 : global_flush_in_progress_micros: IntCounter,
1861 : per_timeline_flush_in_progress_micros: IntCounter,
1862 : global_batch_wait_time: Histogram,
1863 : per_timeline_batch_wait_time: Histogram,
1864 : global_batch_break_reason: [IntCounter; GetPageBatchBreakReason::COUNT],
1865 : per_timeline_batch_break_reason: GetPageBatchBreakReasonTimelineMetrics,
1866 : throttling: Arc<tenant_throttling::Pagestream>,
1867 : }
1868 :
1869 109 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1870 109 : register_int_counter_vec!(
1871 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1872 : "pageserver_smgr_query_started_global_count",
1873 : "Number of smgr queries started, aggregated by query type.",
1874 109 : &["smgr_query_type"],
1875 : )
1876 109 : .expect("failed to define a metric")
1877 109 : });
1878 :
1879 109 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1880 109 : register_int_counter_vec!(
1881 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1882 : "pageserver_smgr_query_started_count",
1883 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1884 109 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1885 : )
1886 109 : .expect("failed to define a metric")
1887 109 : });
1888 :
1889 : /// Per-timeline smgr histogram buckets should be the same as the compute buckets, such that the
1890 : /// metrics are comparable across compute and Pageserver. See also:
1891 : /// <https://github.com/neondatabase/neon/blob/1a87975d956a8ad17ec8b85da32a137ec4893fcc/pgxn/neon/neon_perf_counters.h#L18-L27>
1892 : /// <https://github.com/neondatabase/flux-fleet/blob/556182a939edda87ff1d85a6b02e5cec901e0e9e/apps/base/compute-metrics/scrape-compute-sql-exporter.yaml#L29-L35>
1893 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] =
1894 : &[0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.1, 1.0, 3.0];
1895 :
1896 109 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1897 109 : register_histogram_vec!(
1898 : "pageserver_smgr_query_seconds",
1899 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1900 109 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1901 109 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1902 : )
1903 109 : .expect("failed to define a metric")
1904 109 : });
1905 :
1906 109 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1907 109 : [
1908 109 : 1,
1909 109 : 10,
1910 109 : 20,
1911 109 : 40,
1912 109 : 60,
1913 109 : 80,
1914 109 : 100,
1915 109 : 200,
1916 109 : 300,
1917 109 : 400,
1918 109 : 500,
1919 109 : 600,
1920 109 : 700,
1921 109 : 800,
1922 109 : 900,
1923 109 : 1_000, // 1ms
1924 109 : 2_000,
1925 109 : 4_000,
1926 109 : 6_000,
1927 109 : 8_000,
1928 109 : 10_000, // 10ms
1929 109 : 20_000,
1930 109 : 40_000,
1931 109 : 60_000,
1932 109 : 80_000,
1933 109 : 100_000,
1934 109 : 200_000,
1935 109 : 400_000,
1936 109 : 600_000,
1937 109 : 800_000,
1938 109 : 1_000_000, // 1s
1939 109 : 2_000_000,
1940 109 : 4_000_000,
1941 109 : 6_000_000,
1942 109 : 8_000_000,
1943 109 : 10_000_000, // 10s
1944 109 : 20_000_000,
1945 109 : 50_000_000,
1946 109 : 100_000_000,
1947 109 : 200_000_000,
1948 109 : 1_000_000_000, // 1000s
1949 109 : ]
1950 109 : .into_iter()
1951 109 : .map(Duration::from_micros)
1952 4469 : .map(|d| d.as_secs_f64())
1953 109 : .collect()
1954 109 : });
1955 :
1956 109 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1957 109 : register_histogram_vec!(
1958 : "pageserver_smgr_query_seconds_global",
1959 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1960 109 : &["smgr_query_type"],
1961 109 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1962 : )
1963 109 : .expect("failed to define a metric")
1964 109 : });
1965 :
1966 109 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1967 109 : (1..=u32::try_from(DEFAULT_MAX_GET_VECTORED_KEYS).unwrap())
1968 3488 : .map(|v| v.into())
1969 109 : .collect()
1970 109 : });
1971 :
1972 109 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1973 109 : register_histogram!(
1974 : "pageserver_page_service_batch_size_global",
1975 : "Batch size of pageserver page service requests",
1976 109 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1977 : )
1978 109 : .expect("failed to define a metric")
1979 109 : });
1980 :
1981 109 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1982 109 : let mut buckets = Vec::new();
1983 763 : for i in 0.. {
1984 763 : let bucket = 1 << i;
1985 763 : if bucket > u32::try_from(DEFAULT_MAX_GET_VECTORED_KEYS).unwrap() {
1986 109 : break;
1987 654 : }
1988 654 : buckets.push(bucket.into());
1989 : }
1990 109 : buckets
1991 109 : });
1992 :
1993 109 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1994 109 : register_histogram_vec!(
1995 : "pageserver_page_service_batch_size",
1996 : "Batch size of pageserver page service requests",
1997 109 : &["tenant_id", "shard_id", "timeline_id"],
1998 109 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1999 : )
2000 109 : .expect("failed to define a metric")
2001 109 : });
2002 :
2003 109 : static PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
2004 109 : register_int_counter_vec!(
2005 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
2006 : "pageserver_page_service_batch_break_reason_global",
2007 : "Reason for breaking batches of get page requests",
2008 109 : &["reason"],
2009 : )
2010 109 : .expect("failed to define a metric")
2011 109 : });
2012 :
2013 : struct GetPageBatchBreakReasonTimelineMetrics {
2014 : map: EnumMap<GetPageBatchBreakReason, IntCounter>,
2015 : }
2016 :
2017 : impl GetPageBatchBreakReasonTimelineMetrics {
2018 235 : fn new(tenant_id: &str, shard_slug: &str, timeline_id: &str) -> Self {
2019 : GetPageBatchBreakReasonTimelineMetrics {
2020 1645 : map: EnumMap::from_array(std::array::from_fn(|reason_idx| {
2021 1645 : let reason = GetPageBatchBreakReason::from_usize(reason_idx);
2022 1645 : PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.with_label_values(&[
2023 1645 : tenant_id,
2024 1645 : shard_slug,
2025 1645 : timeline_id,
2026 1645 : reason.into(),
2027 1645 : ])
2028 1645 : })),
2029 : }
2030 235 : }
2031 :
2032 0 : fn inc(&self, reason: GetPageBatchBreakReason) {
2033 0 : self.map[reason].inc()
2034 0 : }
2035 : }
2036 :
2037 109 : static PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
2038 109 : register_int_counter_vec!(
2039 : "pageserver_page_service_batch_break_reason",
2040 : "Reason for breaking batches of get page requests",
2041 109 : &["tenant_id", "shard_id", "timeline_id", "reason"],
2042 : )
2043 109 : .expect("failed to define a metric")
2044 109 : });
2045 :
2046 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
2047 0 : register_int_gauge_vec!(
2048 : "pageserver_page_service_config_max_batch_size",
2049 : "Configured maximum batch size for the server-side batching functionality of page_service. \
2050 : Labels expose more of the configuration parameters.",
2051 0 : &["mode", "execution", "batching"]
2052 : )
2053 0 : .expect("failed to define a metric")
2054 0 : });
2055 :
2056 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
2057 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
2058 0 : let (label_values, value) = match conf {
2059 0 : PageServicePipeliningConfig::Serial => (["serial", "-", "-"], 1),
2060 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
2061 0 : max_batch_size,
2062 0 : execution,
2063 0 : batching,
2064 : }) => {
2065 0 : let mode = "pipelined";
2066 0 : let execution = match execution {
2067 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
2068 0 : "concurrent-futures"
2069 : }
2070 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
2071 : };
2072 0 : let batching = match batching {
2073 0 : PageServiceProtocolPipelinedBatchingStrategy::UniformLsn => "uniform-lsn",
2074 0 : PageServiceProtocolPipelinedBatchingStrategy::ScatteredLsn => "scattered-lsn",
2075 : };
2076 :
2077 0 : ([mode, execution, batching], max_batch_size.get())
2078 : }
2079 : };
2080 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
2081 0 : .with_label_values(&label_values)
2082 0 : .set(value.try_into().unwrap());
2083 0 : }
2084 :
2085 109 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
2086 109 : register_int_counter_vec!(
2087 : "pageserver_page_service_pagestream_flush_in_progress_micros",
2088 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
2089 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
2090 : easily discoverable in monitoring. \
2091 : Hence, this is NOT a completion latency historgram.",
2092 109 : &["tenant_id", "shard_id", "timeline_id"],
2093 : )
2094 109 : .expect("failed to define a metric")
2095 109 : });
2096 :
2097 109 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
2098 109 : register_int_counter!(
2099 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
2100 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
2101 : )
2102 109 : .expect("failed to define a metric")
2103 109 : });
2104 :
2105 109 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2106 109 : register_histogram_vec!(
2107 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
2108 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
2109 109 : &["tenant_id", "shard_id", "timeline_id"],
2110 109 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
2111 : )
2112 109 : .expect("failed to define a metric")
2113 109 : });
2114 :
2115 109 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
2116 109 : register_histogram!(
2117 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
2118 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
2119 109 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
2120 : )
2121 109 : .expect("failed to define a metric")
2122 109 : });
2123 :
2124 : impl SmgrQueryTimePerTimeline {
2125 235 : pub(crate) fn new(
2126 235 : tenant_shard_id: &TenantShardId,
2127 235 : timeline_id: &TimelineId,
2128 235 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
2129 235 : ) -> Self {
2130 235 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2131 235 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
2132 235 : let timeline_id = timeline_id.to_string();
2133 1410 : let global_started = std::array::from_fn(|i| {
2134 1410 : let op = SmgrQueryType::from_repr(i).unwrap();
2135 1410 : SMGR_QUERY_STARTED_GLOBAL
2136 1410 : .get_metric_with_label_values(&[op.into()])
2137 1410 : .unwrap()
2138 1410 : });
2139 1410 : let global_latency = std::array::from_fn(|i| {
2140 1410 : let op = SmgrQueryType::from_repr(i).unwrap();
2141 1410 : SMGR_QUERY_TIME_GLOBAL
2142 1410 : .get_metric_with_label_values(&[op.into()])
2143 1410 : .unwrap()
2144 1410 : });
2145 :
2146 235 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
2147 235 : .get_metric_with_label_values(&[
2148 235 : SmgrQueryType::GetPageAtLsn.into(),
2149 235 : &tenant_id,
2150 235 : &shard_slug,
2151 235 : &timeline_id,
2152 235 : ])
2153 235 : .unwrap();
2154 235 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
2155 235 : .get_metric_with_label_values(&[
2156 235 : SmgrQueryType::GetPageAtLsn.into(),
2157 235 : &tenant_id,
2158 235 : &shard_slug,
2159 235 : &timeline_id,
2160 235 : ])
2161 235 : .unwrap();
2162 :
2163 235 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
2164 235 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
2165 235 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2166 235 : .unwrap();
2167 :
2168 235 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
2169 235 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
2170 235 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2171 235 : .unwrap();
2172 :
2173 1645 : let global_batch_break_reason = std::array::from_fn(|i| {
2174 1645 : let reason = GetPageBatchBreakReason::from_usize(i);
2175 1645 : PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL
2176 1645 : .get_metric_with_label_values(&[reason.into()])
2177 1645 : .unwrap()
2178 1645 : });
2179 235 : let per_timeline_batch_break_reason =
2180 235 : GetPageBatchBreakReasonTimelineMetrics::new(&tenant_id, &shard_slug, &timeline_id);
2181 :
2182 235 : let global_flush_in_progress_micros =
2183 235 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
2184 235 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
2185 235 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2186 235 : .unwrap();
2187 :
2188 235 : Self {
2189 235 : global_started,
2190 235 : global_latency,
2191 235 : per_timeline_getpage_latency,
2192 235 : per_timeline_getpage_started,
2193 235 : global_batch_size,
2194 235 : per_timeline_batch_size,
2195 235 : global_flush_in_progress_micros,
2196 235 : per_timeline_flush_in_progress_micros,
2197 235 : global_batch_wait_time,
2198 235 : per_timeline_batch_wait_time,
2199 235 : global_batch_break_reason,
2200 235 : per_timeline_batch_break_reason,
2201 235 : throttling: pagestream_throttle_metrics,
2202 235 : }
2203 235 : }
2204 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
2205 0 : self.global_started[op as usize].inc();
2206 :
2207 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
2208 0 : self.per_timeline_getpage_started.inc();
2209 0 : Some(self.per_timeline_getpage_latency.clone())
2210 : } else {
2211 0 : None
2212 : };
2213 :
2214 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
2215 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
2216 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
2217 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
2218 0 : per_timeline_flush_in_progress_micros: self
2219 0 : .per_timeline_flush_in_progress_micros
2220 0 : .clone(),
2221 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
2222 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
2223 0 : throttling: self.throttling.clone(),
2224 0 : timings: SmgrOpTimerState::Received { received_at },
2225 0 : }))
2226 0 : }
2227 :
2228 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
2229 0 : pub(crate) fn observe_getpage_batch_start(
2230 0 : &self,
2231 0 : batch_size: usize,
2232 0 : break_reason: GetPageBatchBreakReason,
2233 0 : ) {
2234 0 : self.global_batch_size.observe(batch_size as f64);
2235 0 : self.per_timeline_batch_size.observe(batch_size as f64);
2236 :
2237 0 : self.global_batch_break_reason[break_reason.into_usize()].inc();
2238 0 : self.per_timeline_batch_break_reason.inc(break_reason);
2239 0 : }
2240 : }
2241 :
2242 : // keep in sync with control plane Go code so that we can validate
2243 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
2244 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
2245 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
2246 0 : [
2247 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
2248 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
2249 0 : ]
2250 0 : .map(|ms| (ms as f64) / 1000.0)
2251 0 : });
2252 :
2253 : pub(crate) struct BasebackupQueryTime {
2254 : ok: Histogram,
2255 : error: Histogram,
2256 : client_error: Histogram,
2257 : }
2258 :
2259 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
2260 0 : let vec = register_histogram_vec!(
2261 : "pageserver_basebackup_query_seconds",
2262 : "Histogram of basebackup queries durations, by result type",
2263 0 : &["result"],
2264 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
2265 : )
2266 0 : .expect("failed to define a metric");
2267 0 : BasebackupQueryTime {
2268 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
2269 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
2270 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
2271 0 : }
2272 0 : });
2273 :
2274 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
2275 : parent: &'a BasebackupQueryTime,
2276 : start: std::time::Instant,
2277 : }
2278 :
2279 : impl BasebackupQueryTime {
2280 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
2281 0 : let start = Instant::now();
2282 0 : BasebackupQueryTimeOngoingRecording {
2283 0 : parent: self,
2284 0 : start,
2285 0 : }
2286 0 : }
2287 : }
2288 :
2289 : impl BasebackupQueryTimeOngoingRecording<'_> {
2290 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
2291 0 : let elapsed = self.start.elapsed().as_secs_f64();
2292 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
2293 0 : let metric = match res {
2294 0 : Ok(_) => &self.parent.ok,
2295 : Err(QueryError::Shutdown) | Err(QueryError::Reconnect) => {
2296 : // Do not observe ok/err for shutdown/reconnect.
2297 : // Reconnect error might be raised when the operation is waiting for LSN and the tenant shutdown interrupts
2298 : // the operation. A reconnect error will be issued and the client will retry.
2299 0 : return;
2300 : }
2301 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
2302 0 : if is_expected_io_error(io_error) =>
2303 : {
2304 0 : &self.parent.client_error
2305 : }
2306 0 : Err(_) => &self.parent.error,
2307 : };
2308 0 : metric.observe(elapsed);
2309 0 : }
2310 : }
2311 :
2312 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2313 0 : register_int_counter_pair_vec!(
2314 : "pageserver_live_connections_started",
2315 : "Number of network connections that we started handling",
2316 : "pageserver_live_connections_finished",
2317 : "Number of network connections that we finished handling",
2318 0 : &["pageserver_connection_kind"]
2319 : )
2320 0 : .expect("failed to define a metric")
2321 0 : });
2322 :
2323 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2324 : pub(crate) enum ComputeCommandKind {
2325 : PageStreamV3,
2326 : PageStreamV2,
2327 : Basebackup,
2328 : Fullbackup,
2329 : LeaseLsn,
2330 : }
2331 :
2332 : pub(crate) struct ComputeCommandCounters {
2333 : map: EnumMap<ComputeCommandKind, IntCounter>,
2334 : }
2335 :
2336 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2337 0 : let inner = register_int_counter_vec!(
2338 : "pageserver_compute_commands",
2339 : "Number of compute -> pageserver commands processed",
2340 0 : &["command"]
2341 : )
2342 0 : .expect("failed to define a metric");
2343 :
2344 : ComputeCommandCounters {
2345 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2346 0 : let command = ComputeCommandKind::from_usize(i);
2347 0 : let command_str: &'static str = command.into();
2348 0 : inner.with_label_values(&[command_str])
2349 0 : })),
2350 : }
2351 0 : });
2352 :
2353 : impl ComputeCommandCounters {
2354 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2355 0 : &self.map[command]
2356 0 : }
2357 : }
2358 :
2359 : // remote storage metrics
2360 :
2361 107 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2362 107 : register_int_counter_pair_vec!(
2363 : "pageserver_remote_timeline_client_calls_started",
2364 : "Number of started calls to remote timeline client.",
2365 : "pageserver_remote_timeline_client_calls_finished",
2366 : "Number of finshed calls to remote timeline client.",
2367 107 : &[
2368 107 : "tenant_id",
2369 107 : "shard_id",
2370 107 : "timeline_id",
2371 107 : "file_kind",
2372 107 : "op_kind"
2373 107 : ],
2374 : )
2375 107 : .unwrap()
2376 107 : });
2377 :
2378 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2379 106 : Lazy::new(|| {
2380 106 : register_int_counter_vec!(
2381 : "pageserver_remote_timeline_client_bytes_started",
2382 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2383 : The increment happens when the operation is scheduled.",
2384 106 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2385 : )
2386 106 : .expect("failed to define a metric")
2387 106 : });
2388 :
2389 106 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2390 106 : register_int_counter_vec!(
2391 : "pageserver_remote_timeline_client_bytes_finished",
2392 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2393 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2394 106 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2395 : )
2396 106 : .expect("failed to define a metric")
2397 106 : });
2398 :
2399 : pub(crate) struct TenantManagerMetrics {
2400 : tenant_slots_attached: UIntGauge,
2401 : tenant_slots_secondary: UIntGauge,
2402 : tenant_slots_inprogress: UIntGauge,
2403 : pub(crate) tenant_slot_writes: IntCounter,
2404 : pub(crate) unexpected_errors: IntCounter,
2405 : }
2406 :
2407 : impl TenantManagerMetrics {
2408 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2409 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2410 1 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2411 1 : match slot {
2412 0 : TenantSlot::Attached(_) => {
2413 0 : self.tenant_slots_attached.inc();
2414 0 : }
2415 0 : TenantSlot::Secondary(_) => {
2416 0 : self.tenant_slots_secondary.inc();
2417 0 : }
2418 1 : TenantSlot::InProgress(_) => {
2419 1 : self.tenant_slots_inprogress.inc();
2420 1 : }
2421 : }
2422 1 : }
2423 :
2424 1 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2425 1 : match slot {
2426 1 : TenantSlot::Attached(_) => {
2427 1 : self.tenant_slots_attached.dec();
2428 1 : }
2429 0 : TenantSlot::Secondary(_) => {
2430 0 : self.tenant_slots_secondary.dec();
2431 0 : }
2432 0 : TenantSlot::InProgress(_) => {
2433 0 : self.tenant_slots_inprogress.dec();
2434 0 : }
2435 : }
2436 1 : }
2437 :
2438 : #[cfg(all(debug_assertions, not(test)))]
2439 0 : pub(crate) fn slots_total(&self) -> u64 {
2440 0 : self.tenant_slots_attached.get()
2441 0 : + self.tenant_slots_secondary.get()
2442 0 : + self.tenant_slots_inprogress.get()
2443 0 : }
2444 : }
2445 :
2446 1 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2447 1 : let tenant_slots = register_uint_gauge_vec!(
2448 : "pageserver_tenant_manager_slots",
2449 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2450 1 : &["mode"]
2451 : )
2452 1 : .expect("failed to define a metric");
2453 1 : TenantManagerMetrics {
2454 1 : tenant_slots_attached: tenant_slots
2455 1 : .get_metric_with_label_values(&["attached"])
2456 1 : .unwrap(),
2457 1 : tenant_slots_secondary: tenant_slots
2458 1 : .get_metric_with_label_values(&["secondary"])
2459 1 : .unwrap(),
2460 1 : tenant_slots_inprogress: tenant_slots
2461 1 : .get_metric_with_label_values(&["inprogress"])
2462 1 : .unwrap(),
2463 1 : tenant_slot_writes: register_int_counter!(
2464 1 : "pageserver_tenant_manager_slot_writes",
2465 1 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2466 1 : )
2467 1 : .expect("failed to define a metric"),
2468 1 : unexpected_errors: register_int_counter!(
2469 1 : "pageserver_tenant_manager_unexpected_errors_total",
2470 1 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2471 1 : )
2472 1 : .expect("failed to define a metric"),
2473 1 : }
2474 1 : });
2475 :
2476 : pub(crate) struct DeletionQueueMetrics {
2477 : pub(crate) keys_submitted: IntCounter,
2478 : pub(crate) keys_dropped: IntCounter,
2479 : pub(crate) keys_executed: IntCounter,
2480 : pub(crate) keys_validated: IntCounter,
2481 : pub(crate) dropped_lsn_updates: IntCounter,
2482 : pub(crate) unexpected_errors: IntCounter,
2483 : pub(crate) remote_errors: IntCounterVec,
2484 : }
2485 19 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2486 19 : DeletionQueueMetrics{
2487 19 :
2488 19 : keys_submitted: register_int_counter!(
2489 19 : "pageserver_deletion_queue_submitted_total",
2490 19 : "Number of objects submitted for deletion"
2491 19 : )
2492 19 : .expect("failed to define a metric"),
2493 19 :
2494 19 : keys_dropped: register_int_counter!(
2495 19 : "pageserver_deletion_queue_dropped_total",
2496 19 : "Number of object deletions dropped due to stale generation."
2497 19 : )
2498 19 : .expect("failed to define a metric"),
2499 19 :
2500 19 : keys_executed: register_int_counter!(
2501 19 : "pageserver_deletion_queue_executed_total",
2502 19 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2503 19 : )
2504 19 : .expect("failed to define a metric"),
2505 19 :
2506 19 : keys_validated: register_int_counter!(
2507 19 : "pageserver_deletion_queue_validated_total",
2508 19 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2509 19 : )
2510 19 : .expect("failed to define a metric"),
2511 19 :
2512 19 : dropped_lsn_updates: register_int_counter!(
2513 19 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2514 19 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2515 19 : )
2516 19 : .expect("failed to define a metric"),
2517 19 : unexpected_errors: register_int_counter!(
2518 19 : "pageserver_deletion_queue_unexpected_errors_total",
2519 19 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2520 19 : )
2521 19 : .expect("failed to define a metric"),
2522 19 : remote_errors: register_int_counter_vec!(
2523 19 : "pageserver_deletion_queue_remote_errors_total",
2524 19 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2525 19 : &["op_kind"],
2526 19 : )
2527 19 : .expect("failed to define a metric")
2528 19 : }
2529 19 : });
2530 :
2531 : pub(crate) struct SecondaryModeMetrics {
2532 : pub(crate) upload_heatmap: IntCounter,
2533 : pub(crate) upload_heatmap_errors: IntCounter,
2534 : pub(crate) upload_heatmap_duration: Histogram,
2535 : pub(crate) download_heatmap: IntCounter,
2536 : pub(crate) download_layer: IntCounter,
2537 : }
2538 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2539 0 : SecondaryModeMetrics {
2540 0 : upload_heatmap: register_int_counter!(
2541 0 : "pageserver_secondary_upload_heatmap",
2542 0 : "Number of heatmaps written to remote storage by attached tenants"
2543 0 : )
2544 0 : .expect("failed to define a metric"),
2545 0 : upload_heatmap_errors: register_int_counter!(
2546 0 : "pageserver_secondary_upload_heatmap_errors",
2547 0 : "Failures writing heatmap to remote storage"
2548 0 : )
2549 0 : .expect("failed to define a metric"),
2550 0 : upload_heatmap_duration: register_histogram!(
2551 0 : "pageserver_secondary_upload_heatmap_duration",
2552 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2553 0 : )
2554 0 : .expect("failed to define a metric"),
2555 0 : download_heatmap: register_int_counter!(
2556 0 : "pageserver_secondary_download_heatmap",
2557 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2558 0 : )
2559 0 : .expect("failed to define a metric"),
2560 0 : download_layer: register_int_counter!(
2561 0 : "pageserver_secondary_download_layer",
2562 0 : "Number of downloads of layers by secondary mode locations"
2563 0 : )
2564 0 : .expect("failed to define a metric"),
2565 0 : }
2566 0 : });
2567 :
2568 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2569 0 : register_uint_gauge_vec!(
2570 : "pageserver_secondary_resident_physical_size",
2571 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2572 0 : &["tenant_id", "shard_id"]
2573 : )
2574 0 : .expect("failed to define a metric")
2575 0 : });
2576 :
2577 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2578 0 : register_uint_gauge!(
2579 : "pageserver_utilization_score",
2580 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2581 : )
2582 0 : .expect("failed to define a metric")
2583 0 : });
2584 :
2585 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2586 0 : register_uint_gauge_vec!(
2587 : "pageserver_secondary_heatmap_total_size",
2588 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2589 0 : &["tenant_id", "shard_id"]
2590 : )
2591 0 : .expect("failed to define a metric")
2592 0 : });
2593 :
2594 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2595 : pub enum RemoteOpKind {
2596 : Upload,
2597 : Download,
2598 : Delete,
2599 : }
2600 : impl RemoteOpKind {
2601 8174 : pub fn as_str(&self) -> &'static str {
2602 8174 : match self {
2603 7672 : Self::Upload => "upload",
2604 34 : Self::Download => "download",
2605 468 : Self::Delete => "delete",
2606 : }
2607 8174 : }
2608 : }
2609 :
2610 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2611 : pub enum RemoteOpFileKind {
2612 : Layer,
2613 : Index,
2614 : }
2615 : impl RemoteOpFileKind {
2616 8174 : pub fn as_str(&self) -> &'static str {
2617 8174 : match self {
2618 5813 : Self::Layer => "layer",
2619 2361 : Self::Index => "index",
2620 : }
2621 8174 : }
2622 : }
2623 :
2624 105 : pub(crate) static REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY: Lazy<HistogramVec> = Lazy::new(|| {
2625 105 : register_histogram_vec!(
2626 : "pageserver_remote_timeline_client_seconds_global",
2627 : "Time spent on remote timeline client operations. \
2628 : Grouped by task_kind, file_kind, operation_kind and status. \
2629 : The task_kind is \
2630 : - for layer downloads, populated from RequestContext (primary objective of having the label) \
2631 : - for index downloads, set to 'unknown' \
2632 : - for any upload operation, set to 'RemoteUploadTask' \
2633 : This keeps dimensionality at bay. \
2634 : Does not account for time spent waiting in remote timeline client's queues.",
2635 105 : &["task_kind", "file_kind", "op_kind", "status"]
2636 : )
2637 105 : .expect("failed to define a metric")
2638 105 : });
2639 :
2640 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2641 0 : register_int_counter_vec!(
2642 : "pageserver_tenant_task_events",
2643 : "Number of task start/stop/fail events.",
2644 0 : &["event"],
2645 : )
2646 0 : .expect("Failed to register tenant_task_events metric")
2647 0 : });
2648 :
2649 : pub struct BackgroundLoopSemaphoreMetrics {
2650 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2651 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2652 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2653 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2654 : }
2655 :
2656 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2657 10 : Lazy::new(|| {
2658 10 : let counters = register_int_counter_pair_vec!(
2659 : "pageserver_background_loop_semaphore_wait_start_count",
2660 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2661 : "pageserver_background_loop_semaphore_wait_finish_count",
2662 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2663 10 : &["task"],
2664 : )
2665 10 : .unwrap();
2666 :
2667 10 : let durations = register_histogram_vec!(
2668 : "pageserver_background_loop_semaphore_wait_seconds",
2669 : "Seconds spent waiting on background loop semaphore acquisition",
2670 10 : &["task"],
2671 10 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2672 : )
2673 10 : .unwrap();
2674 :
2675 10 : let waiting_tasks = register_int_gauge_vec!(
2676 : "pageserver_background_loop_semaphore_waiting_tasks",
2677 : "Number of background loop tasks waiting for semaphore",
2678 10 : &["task"],
2679 : )
2680 10 : .unwrap();
2681 :
2682 10 : let running_tasks = register_int_gauge_vec!(
2683 : "pageserver_background_loop_semaphore_running_tasks",
2684 : "Number of background loop tasks running concurrently",
2685 10 : &["task"],
2686 : )
2687 10 : .unwrap();
2688 :
2689 : BackgroundLoopSemaphoreMetrics {
2690 100 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2691 100 : let kind = BackgroundLoopKind::from_usize(i);
2692 100 : counters.with_label_values(&[kind.into()])
2693 100 : })),
2694 100 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2695 100 : let kind = BackgroundLoopKind::from_usize(i);
2696 100 : durations.with_label_values(&[kind.into()])
2697 100 : })),
2698 100 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2699 100 : let kind = BackgroundLoopKind::from_usize(i);
2700 100 : waiting_tasks.with_label_values(&[kind.into()])
2701 100 : })),
2702 100 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2703 100 : let kind = BackgroundLoopKind::from_usize(i);
2704 100 : running_tasks.with_label_values(&[kind.into()])
2705 100 : })),
2706 : }
2707 10 : });
2708 :
2709 : impl BackgroundLoopSemaphoreMetrics {
2710 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2711 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2712 192 : pub(crate) fn record(
2713 192 : &self,
2714 192 : task: BackgroundLoopKind,
2715 192 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2716 192 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2717 192 : }
2718 : }
2719 :
2720 : /// Records metrics for a background task.
2721 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2722 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2723 : task: BackgroundLoopKind,
2724 : start: Instant,
2725 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2726 : }
2727 :
2728 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2729 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2730 : /// `wait_start_count` and `waiting_tasks`.
2731 192 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2732 192 : metrics.waiting_tasks[task].inc();
2733 192 : Self {
2734 192 : metrics,
2735 192 : task,
2736 192 : start: Instant::now(),
2737 192 : wait_counter_guard: Some(metrics.counters[task].guard()),
2738 192 : }
2739 192 : }
2740 :
2741 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2742 192 : pub fn acquired(&mut self) -> Duration {
2743 192 : let waited = self.start.elapsed();
2744 192 : self.wait_counter_guard.take().expect("already acquired");
2745 192 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2746 192 : self.metrics.waiting_tasks[self.task].dec();
2747 192 : self.metrics.running_tasks[self.task].inc();
2748 192 : waited
2749 192 : }
2750 : }
2751 :
2752 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2753 : /// The task either completed or was cancelled.
2754 192 : fn drop(&mut self) {
2755 192 : if self.wait_counter_guard.take().is_some() {
2756 0 : // Waiting.
2757 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2758 0 : self.metrics.waiting_tasks[self.task].dec();
2759 192 : } else {
2760 192 : // Running.
2761 192 : self.metrics.running_tasks[self.task].dec();
2762 192 : }
2763 192 : }
2764 : }
2765 :
2766 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2767 0 : register_int_counter_vec!(
2768 : "pageserver_background_loop_period_overrun_count",
2769 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2770 0 : &["task", "period"],
2771 : )
2772 0 : .expect("failed to define a metric")
2773 0 : });
2774 :
2775 : // walreceiver metrics
2776 :
2777 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2778 0 : register_int_counter!(
2779 : "pageserver_walreceiver_started_connections_total",
2780 : "Number of started walreceiver connections"
2781 : )
2782 0 : .expect("failed to define a metric")
2783 0 : });
2784 :
2785 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2786 0 : register_int_gauge!(
2787 : "pageserver_walreceiver_active_managers",
2788 : "Number of active walreceiver managers"
2789 : )
2790 0 : .expect("failed to define a metric")
2791 0 : });
2792 :
2793 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2794 0 : register_int_counter_vec!(
2795 : "pageserver_walreceiver_switches_total",
2796 : "Number of walreceiver manager change_connection calls",
2797 0 : &["reason"]
2798 : )
2799 0 : .expect("failed to define a metric")
2800 0 : });
2801 :
2802 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2803 0 : register_int_counter!(
2804 : "pageserver_walreceiver_broker_updates_total",
2805 : "Number of received broker updates in walreceiver"
2806 : )
2807 0 : .expect("failed to define a metric")
2808 0 : });
2809 :
2810 1 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2811 1 : register_int_counter_vec!(
2812 : "pageserver_walreceiver_candidates_events_total",
2813 : "Number of walreceiver candidate events",
2814 1 : &["event"]
2815 : )
2816 1 : .expect("failed to define a metric")
2817 1 : });
2818 :
2819 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2820 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2821 :
2822 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2823 1 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2824 :
2825 0 : pub(crate) static LOCAL_DATA_LOSS_SUSPECTED: Lazy<IntGauge> = Lazy::new(|| {
2826 0 : register_int_gauge!(
2827 : "pageserver_local_data_loss_suspected",
2828 : "Non-zero value indicates that pageserver local data loss is suspected (and highly likely)."
2829 : )
2830 0 : .expect("failed to define a metric")
2831 0 : });
2832 :
2833 : // Counter keeping track of misrouted PageStream requests. Spelling out PageStream requests here to distinguish
2834 : // it from other types of reqeusts (SK wal replication, http requests, etc.). PageStream requests are used by
2835 : // Postgres compute to fetch data from pageservers.
2836 : // A misrouted PageStream request is registered if the pageserver cannot find the tenant identified in the
2837 : // request, or if the pageserver is not the "primary" serving the tenant shard. These error almost always identify
2838 : // issues with compute configuration, caused by either the compute node itself being stuck in the wrong
2839 : // configuration or Storage Controller reconciliation bugs. Misrouted requests are expected during tenant migration
2840 : // and/or during recovery following a pageserver failure, but persistently high rates of misrouted requests
2841 : // are indicative of bugs (and unavailability).
2842 0 : pub(crate) static MISROUTED_PAGESTREAM_REQUESTS: Lazy<IntCounter> = Lazy::new(|| {
2843 0 : register_int_counter!(
2844 : "pageserver_misrouted_pagestream_requests_total",
2845 : "Number of pageserver pagestream requests that were routed to the wrong pageserver"
2846 : )
2847 0 : .expect("failed to define a metric")
2848 0 : });
2849 :
2850 : // Global counter for PageStream request results by outcome. Outcomes are divided into 3 categories:
2851 : // - success
2852 : // - internal_error: errors that indicate bugs in the storage cluster (e.g. page reconstruction errors, misrouted requests, LSN timeout errors)
2853 : // - other_error: transient error conditions that are expected in normal operation or indicate bugs with other parts of the system (e.g. error due to pageserver shutdown, malformed requests etc.)
2854 0 : pub(crate) static PAGESTREAM_HANDLER_RESULTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| {
2855 0 : register_int_counter_vec!(
2856 : "pageserver_pagestream_handler_results_total",
2857 : "Number of pageserver pagestream handler results by outcome (success, internal_error, other_error)",
2858 0 : &["outcome"]
2859 : )
2860 0 : .expect("failed to define a metric")
2861 0 : });
2862 :
2863 : // Constants for pageserver_pagestream_handler_results_total's outcome labels
2864 : pub(crate) const PAGESTREAM_HANDLER_OUTCOME_SUCCESS: &str = "success";
2865 : pub(crate) const PAGESTREAM_HANDLER_OUTCOME_INTERNAL_ERROR: &str = "internal_error";
2866 : pub(crate) const PAGESTREAM_HANDLER_OUTCOME_OTHER_ERROR: &str = "other_error";
2867 :
2868 : // Metrics collected on WAL redo operations
2869 : //
2870 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2871 : // for access to the postgres process ('wait') since there is only one for
2872 : // each tenant.
2873 :
2874 : /// Time buckets are small because we want to be able to measure the
2875 : /// smallest redo processing times. These buckets allow us to measure down
2876 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2877 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2878 : ///
2879 : /// Values up to 1s are recorded because metrics show that we have redo
2880 : /// durations and lock times larger than 0.250s.
2881 : macro_rules! redo_histogram_time_buckets {
2882 : () => {
2883 : vec![
2884 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2885 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2886 : 1.000_000,
2887 : ]
2888 : };
2889 : }
2890 :
2891 : /// While we're at it, also measure the amount of records replayed in each
2892 : /// operation. We have a global 'total replayed' counter, but that's not
2893 : /// as useful as 'what is the skew for how many records we replay in one
2894 : /// operation'.
2895 : macro_rules! redo_histogram_count_buckets {
2896 : () => {
2897 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2898 : };
2899 : }
2900 :
2901 : macro_rules! redo_bytes_histogram_count_buckets {
2902 : () => {
2903 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2904 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2905 : vec![
2906 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2907 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2908 : ]
2909 : };
2910 : }
2911 :
2912 : pub(crate) struct WalIngestMetrics {
2913 : pub(crate) bytes_received: IntCounter,
2914 : pub(crate) records_received: IntCounter,
2915 : pub(crate) records_observed: IntCounter,
2916 : pub(crate) records_committed: IntCounter,
2917 : pub(crate) values_committed_metadata_images: IntCounter,
2918 : pub(crate) values_committed_metadata_deltas: IntCounter,
2919 : pub(crate) values_committed_data_images: IntCounter,
2920 : pub(crate) values_committed_data_deltas: IntCounter,
2921 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2922 : }
2923 :
2924 : impl WalIngestMetrics {
2925 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2926 0 : if stats.metadata_images > 0 {
2927 0 : self.values_committed_metadata_images
2928 0 : .inc_by(stats.metadata_images);
2929 0 : }
2930 0 : if stats.metadata_deltas > 0 {
2931 0 : self.values_committed_metadata_deltas
2932 0 : .inc_by(stats.metadata_deltas);
2933 0 : }
2934 0 : if stats.data_images > 0 {
2935 0 : self.values_committed_data_images.inc_by(stats.data_images);
2936 0 : }
2937 0 : if stats.data_deltas > 0 {
2938 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2939 0 : }
2940 0 : }
2941 : }
2942 :
2943 5 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2944 5 : let values_committed = register_int_counter_vec!(
2945 : "pageserver_wal_ingest_values_committed",
2946 : "Number of values committed to pageserver storage from WAL records",
2947 5 : &["class", "kind"],
2948 : )
2949 5 : .expect("failed to define a metric");
2950 :
2951 5 : WalIngestMetrics {
2952 5 : bytes_received: register_int_counter!(
2953 5 : "pageserver_wal_ingest_bytes_received",
2954 5 : "Bytes of WAL ingested from safekeepers",
2955 5 : )
2956 5 : .unwrap(),
2957 5 : records_received: register_int_counter!(
2958 5 : "pageserver_wal_ingest_records_received",
2959 5 : "Number of WAL records received from safekeepers"
2960 5 : )
2961 5 : .expect("failed to define a metric"),
2962 5 : records_observed: register_int_counter!(
2963 5 : "pageserver_wal_ingest_records_observed",
2964 5 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2965 5 : )
2966 5 : .expect("failed to define a metric"),
2967 5 : records_committed: register_int_counter!(
2968 5 : "pageserver_wal_ingest_records_committed",
2969 5 : "Number of WAL records which resulted in writes to pageserver storage"
2970 5 : )
2971 5 : .expect("failed to define a metric"),
2972 5 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2973 5 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2974 5 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2975 5 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2976 5 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2977 5 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2978 5 : "Total number of zero gap blocks written on relation extends"
2979 5 : )
2980 5 : .expect("failed to define a metric"),
2981 5 : }
2982 5 : });
2983 :
2984 109 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2985 109 : register_int_counter_vec!(
2986 : "pageserver_timeline_wal_records_received",
2987 : "Number of WAL records received per shard",
2988 109 : &["tenant_id", "shard_id", "timeline_id"]
2989 : )
2990 109 : .expect("failed to define a metric")
2991 109 : });
2992 :
2993 3 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2994 3 : register_histogram!(
2995 : "pageserver_wal_redo_seconds",
2996 : "Time spent on WAL redo",
2997 3 : redo_histogram_time_buckets!()
2998 : )
2999 3 : .expect("failed to define a metric")
3000 3 : });
3001 :
3002 3 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
3003 3 : register_histogram!(
3004 : "pageserver_wal_redo_records_histogram",
3005 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
3006 3 : redo_histogram_count_buckets!(),
3007 : )
3008 3 : .expect("failed to define a metric")
3009 3 : });
3010 :
3011 3 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
3012 3 : register_histogram!(
3013 : "pageserver_wal_redo_bytes_histogram",
3014 : "Histogram of number of records replayed per redo sent to Postgres",
3015 3 : redo_bytes_histogram_count_buckets!(),
3016 : )
3017 3 : .expect("failed to define a metric")
3018 3 : });
3019 :
3020 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
3021 3 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
3022 3 : register_int_counter!(
3023 : "pageserver_replayed_wal_records_total",
3024 : "Number of WAL records replayed in WAL redo process"
3025 : )
3026 3 : .unwrap()
3027 3 : });
3028 :
3029 : #[rustfmt::skip]
3030 4 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
3031 4 : register_histogram!(
3032 : "pageserver_wal_redo_process_launch_duration",
3033 : "Histogram of the duration of successful WalRedoProcess::launch calls",
3034 4 : vec![
3035 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
3036 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
3037 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
3038 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
3039 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
3040 : ],
3041 : )
3042 4 : .expect("failed to define a metric")
3043 4 : });
3044 :
3045 : pub(crate) struct WalRedoProcessCounters {
3046 : pub(crate) started: IntCounter,
3047 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
3048 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
3049 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
3050 : }
3051 :
3052 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
3053 : pub(crate) enum WalRedoKillCause {
3054 : WalRedoProcessDrop,
3055 : NoLeakChildDrop,
3056 : Startup,
3057 : }
3058 :
3059 : impl Default for WalRedoProcessCounters {
3060 4 : fn default() -> Self {
3061 4 : let started = register_int_counter!(
3062 : "pageserver_wal_redo_process_started_total",
3063 : "Number of WAL redo processes started",
3064 : )
3065 4 : .unwrap();
3066 :
3067 4 : let killed = register_int_counter_vec!(
3068 : "pageserver_wal_redo_process_stopped_total",
3069 : "Number of WAL redo processes stopped",
3070 4 : &["cause"],
3071 : )
3072 4 : .unwrap();
3073 :
3074 4 : let active_stderr_logger_tasks_started = register_int_counter!(
3075 : "pageserver_walredo_stderr_logger_tasks_started_total",
3076 : "Number of active walredo stderr logger tasks that have started",
3077 : )
3078 4 : .unwrap();
3079 :
3080 4 : let active_stderr_logger_tasks_finished = register_int_counter!(
3081 : "pageserver_walredo_stderr_logger_tasks_finished_total",
3082 : "Number of active walredo stderr logger tasks that have finished",
3083 : )
3084 4 : .unwrap();
3085 :
3086 : Self {
3087 4 : started,
3088 12 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
3089 12 : let cause = WalRedoKillCause::from_usize(i);
3090 12 : let cause_str: &'static str = cause.into();
3091 12 : killed.with_label_values(&[cause_str])
3092 12 : })),
3093 4 : active_stderr_logger_tasks_started,
3094 4 : active_stderr_logger_tasks_finished,
3095 : }
3096 4 : }
3097 : }
3098 :
3099 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
3100 : Lazy::new(WalRedoProcessCounters::default);
3101 :
3102 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
3103 : pub(crate) struct StorageTimeMetricsTimer {
3104 : metrics: StorageTimeMetrics,
3105 : start: Instant,
3106 : stopped: Cell<bool>,
3107 : }
3108 :
3109 : impl StorageTimeMetricsTimer {
3110 1751 : fn new(metrics: StorageTimeMetrics) -> Self {
3111 : /*BEGIN_HADRON */
3112 : // record the active operation as the timer starts
3113 1751 : metrics.timeline_active_count.inc();
3114 : /*END_HADRON */
3115 1751 : Self {
3116 1751 : metrics,
3117 1751 : start: Instant::now(),
3118 1751 : stopped: Cell::new(false),
3119 1751 : }
3120 1751 : }
3121 :
3122 : /// Returns the elapsed duration of the timer.
3123 1750 : pub fn elapsed(&self) -> Duration {
3124 1750 : self.start.elapsed()
3125 1750 : }
3126 :
3127 : /// Record the time from creation to now and return it.
3128 1750 : pub fn stop_and_record(self) -> Duration {
3129 1750 : let duration = self.elapsed();
3130 1750 : let seconds = duration.as_secs_f64();
3131 1750 : self.metrics.timeline_sum.inc_by(seconds);
3132 1750 : self.metrics.timeline_count.inc();
3133 1750 : self.metrics.global_histogram.observe(seconds);
3134 : /* BEGIN_HADRON*/
3135 1750 : self.stopped.set(true);
3136 1750 : self.metrics.timeline_active_count.dec();
3137 : /*END_HADRON */
3138 1750 : duration
3139 1750 : }
3140 :
3141 : /// Turns this timer into a timer, which will always record -- usually this means recording
3142 : /// regardless an early `?` path was taken in a function.
3143 385 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
3144 385 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
3145 385 : }
3146 : }
3147 :
3148 : /*BEGIN_HADRON */
3149 : impl Drop for StorageTimeMetricsTimer {
3150 1751 : fn drop(&mut self) {
3151 1751 : if !self.stopped.get() {
3152 1 : self.metrics.timeline_active_count.dec();
3153 1750 : }
3154 1751 : }
3155 : }
3156 : /*END_HADRON */
3157 :
3158 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
3159 :
3160 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
3161 385 : fn drop(&mut self) {
3162 385 : if let Some(inner) = self.0.take() {
3163 385 : inner.stop_and_record();
3164 385 : }
3165 385 : }
3166 : }
3167 :
3168 : impl AlwaysRecordingStorageTimeMetricsTimer {
3169 : /// Returns the elapsed duration of the timer.
3170 0 : pub fn elapsed(&self) -> Duration {
3171 0 : self.0.as_ref().expect("not dropped yet").elapsed()
3172 0 : }
3173 : }
3174 :
3175 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
3176 : /// timeline total sum and count.
3177 : #[derive(Clone, Debug)]
3178 : pub(crate) struct StorageTimeMetrics {
3179 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
3180 : timeline_sum: Counter,
3181 : /// Number of oeprations, per operation, tenant_id and timeline_id
3182 : timeline_count: IntCounter,
3183 : /*BEGIN_HADRON */
3184 : /// Number of active operations per operation, tenant_id, and timeline_id
3185 : timeline_active_count: IntGauge,
3186 : /*END_HADRON */
3187 : /// Global histogram having only the "operation" label.
3188 : global_histogram: Histogram,
3189 : }
3190 :
3191 : impl StorageTimeMetrics {
3192 2115 : pub fn new(
3193 2115 : operation: StorageTimeOperation,
3194 2115 : tenant_id: &str,
3195 2115 : shard_id: &str,
3196 2115 : timeline_id: &str,
3197 2115 : ) -> Self {
3198 2115 : let operation: &'static str = operation.into();
3199 :
3200 2115 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
3201 2115 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3202 2115 : .unwrap();
3203 2115 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
3204 2115 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3205 2115 : .unwrap();
3206 : /*BEGIN_HADRON */
3207 2115 : let timeline_active_count = STORAGE_ACTIVE_COUNT_PER_TIMELINE
3208 2115 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3209 2115 : .unwrap();
3210 : /*END_HADRON */
3211 2115 : let global_histogram = STORAGE_TIME_GLOBAL
3212 2115 : .get_metric_with_label_values(&[operation])
3213 2115 : .unwrap();
3214 :
3215 2115 : StorageTimeMetrics {
3216 2115 : timeline_sum,
3217 2115 : timeline_count,
3218 2115 : timeline_active_count,
3219 2115 : global_histogram,
3220 2115 : }
3221 2115 : }
3222 :
3223 : /// Starts timing a new operation.
3224 : ///
3225 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
3226 1751 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
3227 1751 : StorageTimeMetricsTimer::new(self.clone())
3228 1751 : }
3229 : }
3230 :
3231 : pub(crate) struct TimelineMetrics {
3232 : tenant_id: String,
3233 : shard_id: String,
3234 : timeline_id: String,
3235 : pub flush_time_histo: StorageTimeMetrics,
3236 : pub flush_delay_histo: StorageTimeMetrics,
3237 : pub compact_time_histo: StorageTimeMetrics,
3238 : pub create_images_time_histo: StorageTimeMetrics,
3239 : pub logical_size_histo: StorageTimeMetrics,
3240 : pub imitate_logical_size_histo: StorageTimeMetrics,
3241 : pub load_layer_map_histo: StorageTimeMetrics,
3242 : pub garbage_collect_histo: StorageTimeMetrics,
3243 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
3244 : pub last_record_lsn_gauge: IntGauge,
3245 : pub disk_consistent_lsn_gauge: IntGauge,
3246 : pub pitr_history_size: UIntGauge,
3247 : pub archival_size: UIntGauge,
3248 : pub layers_per_read: Histogram,
3249 : pub standby_horizon_gauge: IntGauge,
3250 : pub resident_physical_size_gauge: UIntGauge,
3251 : pub visible_physical_size_gauge: UIntGauge,
3252 : /// copy of LayeredTimeline.current_logical_size
3253 : pub current_logical_size_gauge: UIntGauge,
3254 : pub aux_file_size_gauge: IntGauge,
3255 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
3256 : pub evictions: IntCounter,
3257 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
3258 : /// Number of valid LSN leases.
3259 : pub valid_lsn_lease_count_gauge: UIntGauge,
3260 : pub wal_records_received: IntCounter,
3261 : pub storage_io_size: StorageIoSizeMetrics,
3262 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
3263 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
3264 : pub wait_ondemand_download_time: wait_ondemand_download_time::WaitOndemandDownloadTimeSum,
3265 : shutdown: std::sync::atomic::AtomicBool,
3266 : }
3267 :
3268 : impl TimelineMetrics {
3269 235 : pub fn new(
3270 235 : tenant_shard_id: &TenantShardId,
3271 235 : timeline_id_raw: &TimelineId,
3272 235 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
3273 235 : ) -> Self {
3274 235 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3275 235 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3276 235 : let timeline_id = timeline_id_raw.to_string();
3277 235 : let flush_time_histo = StorageTimeMetrics::new(
3278 235 : StorageTimeOperation::LayerFlush,
3279 235 : &tenant_id,
3280 235 : &shard_id,
3281 235 : &timeline_id,
3282 : );
3283 235 : let flush_delay_histo = StorageTimeMetrics::new(
3284 235 : StorageTimeOperation::LayerFlushDelay,
3285 235 : &tenant_id,
3286 235 : &shard_id,
3287 235 : &timeline_id,
3288 : );
3289 235 : let compact_time_histo = StorageTimeMetrics::new(
3290 235 : StorageTimeOperation::Compact,
3291 235 : &tenant_id,
3292 235 : &shard_id,
3293 235 : &timeline_id,
3294 : );
3295 235 : let create_images_time_histo = StorageTimeMetrics::new(
3296 235 : StorageTimeOperation::CreateImages,
3297 235 : &tenant_id,
3298 235 : &shard_id,
3299 235 : &timeline_id,
3300 : );
3301 235 : let logical_size_histo = StorageTimeMetrics::new(
3302 235 : StorageTimeOperation::LogicalSize,
3303 235 : &tenant_id,
3304 235 : &shard_id,
3305 235 : &timeline_id,
3306 : );
3307 235 : let imitate_logical_size_histo = StorageTimeMetrics::new(
3308 235 : StorageTimeOperation::ImitateLogicalSize,
3309 235 : &tenant_id,
3310 235 : &shard_id,
3311 235 : &timeline_id,
3312 : );
3313 235 : let load_layer_map_histo = StorageTimeMetrics::new(
3314 235 : StorageTimeOperation::LoadLayerMap,
3315 235 : &tenant_id,
3316 235 : &shard_id,
3317 235 : &timeline_id,
3318 : );
3319 235 : let garbage_collect_histo = StorageTimeMetrics::new(
3320 235 : StorageTimeOperation::Gc,
3321 235 : &tenant_id,
3322 235 : &shard_id,
3323 235 : &timeline_id,
3324 : );
3325 235 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
3326 235 : StorageTimeOperation::FindGcCutoffs,
3327 235 : &tenant_id,
3328 235 : &shard_id,
3329 235 : &timeline_id,
3330 : );
3331 235 : let last_record_lsn_gauge = LAST_RECORD_LSN
3332 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3333 235 : .unwrap();
3334 :
3335 235 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
3336 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3337 235 : .unwrap();
3338 :
3339 235 : let pitr_history_size = PITR_HISTORY_SIZE
3340 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3341 235 : .unwrap();
3342 :
3343 235 : let archival_size = TIMELINE_ARCHIVE_SIZE
3344 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3345 235 : .unwrap();
3346 :
3347 235 : let layers_per_read = LAYERS_PER_READ
3348 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3349 235 : .unwrap();
3350 :
3351 235 : let standby_horizon_gauge = STANDBY_HORIZON
3352 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3353 235 : .unwrap();
3354 235 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
3355 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3356 235 : .unwrap();
3357 235 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
3358 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3359 235 : .unwrap();
3360 : // TODO: we shouldn't expose this metric
3361 235 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
3362 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3363 235 : .unwrap();
3364 235 : let aux_file_size_gauge = AUX_FILE_SIZE
3365 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3366 235 : .unwrap();
3367 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
3368 235 : let directory_entries_count_gauge_closure = {
3369 235 : let tenant_shard_id = *tenant_shard_id;
3370 235 : let timeline_id_raw = *timeline_id_raw;
3371 0 : move || {
3372 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3373 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3374 0 : let timeline_id = timeline_id_raw.to_string();
3375 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
3376 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3377 0 : .unwrap();
3378 0 : gauge
3379 0 : }
3380 : };
3381 235 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3382 235 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3383 235 : let evictions = EVICTIONS
3384 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3385 235 : .unwrap();
3386 235 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3387 235 : .build(&tenant_id, &shard_id, &timeline_id);
3388 :
3389 235 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3390 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3391 235 : .unwrap();
3392 :
3393 235 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3394 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3395 235 : .unwrap();
3396 :
3397 235 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3398 :
3399 235 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3400 235 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3401 235 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3402 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3403 235 : .unwrap(),
3404 235 : };
3405 :
3406 235 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3407 235 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3408 235 : .unwrap();
3409 :
3410 235 : let wait_ondemand_download_time =
3411 235 : wait_ondemand_download_time::WaitOndemandDownloadTimeSum::new(
3412 235 : &tenant_id,
3413 235 : &shard_id,
3414 235 : &timeline_id,
3415 : );
3416 :
3417 235 : TIMELINE_STATE_METRIC.with_label_values(&["active"]).inc();
3418 :
3419 235 : TimelineMetrics {
3420 235 : tenant_id,
3421 235 : shard_id,
3422 235 : timeline_id,
3423 235 : flush_time_histo,
3424 235 : flush_delay_histo,
3425 235 : compact_time_histo,
3426 235 : create_images_time_histo,
3427 235 : logical_size_histo,
3428 235 : imitate_logical_size_histo,
3429 235 : garbage_collect_histo,
3430 235 : find_gc_cutoffs_histo,
3431 235 : load_layer_map_histo,
3432 235 : last_record_lsn_gauge,
3433 235 : disk_consistent_lsn_gauge,
3434 235 : pitr_history_size,
3435 235 : archival_size,
3436 235 : layers_per_read,
3437 235 : standby_horizon_gauge,
3438 235 : resident_physical_size_gauge,
3439 235 : visible_physical_size_gauge,
3440 235 : current_logical_size_gauge,
3441 235 : aux_file_size_gauge,
3442 235 : directory_entries_count_gauge,
3443 235 : evictions,
3444 235 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3445 235 : evictions_with_low_residence_duration,
3446 235 : ),
3447 235 : storage_io_size,
3448 235 : valid_lsn_lease_count_gauge,
3449 235 : wal_records_received,
3450 235 : wait_lsn_in_progress_micros,
3451 235 : wait_lsn_start_finish_counterpair,
3452 235 : wait_ondemand_download_time,
3453 235 : shutdown: std::sync::atomic::AtomicBool::default(),
3454 235 : }
3455 235 : }
3456 :
3457 805 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3458 805 : self.resident_physical_size_add(sz);
3459 805 : }
3460 :
3461 277 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3462 277 : self.resident_physical_size_gauge.sub(sz);
3463 277 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3464 277 : }
3465 :
3466 873 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3467 873 : self.resident_physical_size_gauge.add(sz);
3468 873 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3469 873 : }
3470 :
3471 5 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3472 5 : self.resident_physical_size_gauge.get()
3473 5 : }
3474 :
3475 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3476 1342 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3477 1342 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3478 715 : true => LayerLevel::L0,
3479 627 : false => LayerLevel::L1,
3480 : };
3481 1342 : let kind = match layer_desc.is_delta() {
3482 1107 : true => LayerKind::Delta,
3483 235 : false => LayerKind::Image,
3484 : };
3485 1342 : [
3486 1342 : &self.tenant_id,
3487 1342 : &self.shard_id,
3488 1342 : &self.timeline_id,
3489 1342 : level.into(),
3490 1342 : kind.into(),
3491 1342 : ]
3492 1342 : }
3493 :
3494 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3495 1193 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3496 1193 : [
3497 1193 : &self.tenant_id,
3498 1193 : &self.shard_id,
3499 1193 : &self.timeline_id,
3500 1193 : LayerLevel::Frozen.into(),
3501 1193 : LayerKind::Delta.into(), // by definition
3502 1193 : ]
3503 1193 : }
3504 :
3505 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3506 596 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3507 596 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3508 596 : let labels = self.make_frozen_layer_labels(layer);
3509 596 : let size = layer.len();
3510 596 : TIMELINE_LAYER_COUNT
3511 596 : .get_metric_with_label_values(&labels)
3512 596 : .unwrap()
3513 596 : .dec();
3514 596 : TIMELINE_LAYER_SIZE
3515 596 : .get_metric_with_label_values(&labels)
3516 596 : .unwrap()
3517 596 : .sub(size);
3518 596 : }
3519 :
3520 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3521 597 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3522 597 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3523 597 : let labels = self.make_frozen_layer_labels(layer);
3524 597 : let size = layer.len();
3525 597 : TIMELINE_LAYER_COUNT
3526 597 : .get_metric_with_label_values(&labels)
3527 597 : .unwrap()
3528 597 : .inc();
3529 597 : TIMELINE_LAYER_SIZE
3530 597 : .get_metric_with_label_values(&labels)
3531 597 : .unwrap()
3532 597 : .add(size);
3533 597 : }
3534 :
3535 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3536 352 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3537 352 : let labels = self.make_layer_labels(layer_desc);
3538 352 : TIMELINE_LAYER_COUNT
3539 352 : .get_metric_with_label_values(&labels)
3540 352 : .unwrap()
3541 352 : .dec();
3542 352 : TIMELINE_LAYER_SIZE
3543 352 : .get_metric_with_label_values(&labels)
3544 352 : .unwrap()
3545 352 : .sub(layer_desc.file_size);
3546 352 : }
3547 :
3548 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3549 990 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3550 990 : let labels = self.make_layer_labels(layer_desc);
3551 990 : TIMELINE_LAYER_COUNT
3552 990 : .get_metric_with_label_values(&labels)
3553 990 : .unwrap()
3554 990 : .inc();
3555 990 : TIMELINE_LAYER_SIZE
3556 990 : .get_metric_with_label_values(&labels)
3557 990 : .unwrap()
3558 990 : .add(layer_desc.file_size);
3559 990 : }
3560 :
3561 5 : pub(crate) fn shutdown(&self) {
3562 5 : let was_shutdown = self
3563 5 : .shutdown
3564 5 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3565 :
3566 5 : if was_shutdown {
3567 : // this happens on tenant deletion because tenant first shuts down timelines, then
3568 : // invokes timeline deletion which first shuts down the timeline again.
3569 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3570 0 : return;
3571 5 : }
3572 :
3573 5 : TIMELINE_STATE_METRIC.with_label_values(&["active"]).dec();
3574 :
3575 5 : let tenant_id = &self.tenant_id;
3576 5 : let timeline_id = &self.timeline_id;
3577 5 : let shard_id = &self.shard_id;
3578 5 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3579 5 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3580 5 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3581 5 : {
3582 5 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3583 5 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3584 5 : }
3585 5 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3586 5 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3587 5 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3588 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3589 5 : }
3590 :
3591 5 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3592 5 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3593 :
3594 15 : for ref level in LayerLevel::iter() {
3595 30 : for ref kind in LayerKind::iter() {
3596 30 : let labels: [&str; 5] =
3597 30 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3598 30 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3599 30 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3600 30 : }
3601 : }
3602 :
3603 5 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3604 :
3605 5 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3606 5 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3607 5 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3608 :
3609 5 : self.evictions_with_low_residence_duration
3610 5 : .write()
3611 5 : .unwrap()
3612 5 : .remove(tenant_id, shard_id, timeline_id);
3613 :
3614 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3615 : // removed at the end of it. The idea is to have the metrics outlive the
3616 : // entity during which they're observed, e.g., the smgr metrics shall
3617 : // outlive an individual smgr connection, but not the timeline.
3618 :
3619 50 : for op in StorageTimeOperation::VARIANTS {
3620 45 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3621 45 : op,
3622 45 : tenant_id,
3623 45 : shard_id,
3624 45 : timeline_id,
3625 45 : ]);
3626 45 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3627 45 : op,
3628 45 : tenant_id,
3629 45 : shard_id,
3630 45 : timeline_id,
3631 45 : ]);
3632 45 : /* BEGIN_HADRON */
3633 45 : let _ = STORAGE_ACTIVE_COUNT_PER_TIMELINE.remove_label_values(&[
3634 45 : op,
3635 45 : tenant_id,
3636 45 : shard_id,
3637 45 : timeline_id,
3638 45 : ]);
3639 45 : /*END_HADRON */
3640 45 : }
3641 :
3642 15 : for op in StorageIoSizeOperation::VARIANTS {
3643 10 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3644 10 : }
3645 :
3646 : let _ =
3647 5 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3648 :
3649 5 : {
3650 5 : let mut res = [Ok(()), Ok(())];
3651 5 : WAIT_LSN_START_FINISH_COUNTERPAIR
3652 5 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3653 5 : }
3654 :
3655 5 : wait_ondemand_download_time::shutdown_timeline(tenant_id, shard_id, timeline_id);
3656 :
3657 5 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3658 5 : SmgrQueryType::GetPageAtLsn.into(),
3659 5 : tenant_id,
3660 5 : shard_id,
3661 5 : timeline_id,
3662 5 : ]);
3663 5 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3664 5 : SmgrQueryType::GetPageAtLsn.into(),
3665 5 : tenant_id,
3666 5 : shard_id,
3667 5 : timeline_id,
3668 5 : ]);
3669 5 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3670 5 : tenant_id,
3671 5 : shard_id,
3672 5 : timeline_id,
3673 5 : ]);
3674 5 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3675 5 : tenant_id,
3676 5 : shard_id,
3677 5 : timeline_id,
3678 5 : ]);
3679 5 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3680 5 : tenant_id,
3681 5 : shard_id,
3682 5 : timeline_id,
3683 5 : ]);
3684 5 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3685 5 : tenant_id,
3686 5 : shard_id,
3687 5 : timeline_id,
3688 5 : ]);
3689 :
3690 35 : for reason in GetPageBatchBreakReason::iter() {
3691 35 : let _ = PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.remove_label_values(&[
3692 35 : tenant_id,
3693 35 : shard_id,
3694 35 : timeline_id,
3695 35 : reason.into(),
3696 35 : ]);
3697 35 : }
3698 5 : }
3699 : }
3700 :
3701 3 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3702 3 : let tid = tenant_shard_id.tenant_id.to_string();
3703 3 : let shard_id = tenant_shard_id.shard_slug().to_string();
3704 :
3705 : // Only shard zero deals in synthetic sizes
3706 3 : if tenant_shard_id.is_shard_zero() {
3707 3 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3708 3 : }
3709 3 : let _ = TENANT_OFFLOADED_TIMELINES.remove_label_values(&[&tid, &shard_id]);
3710 :
3711 3 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3712 :
3713 : // we leave the BROKEN_TENANTS_SET entry if any
3714 3 : }
3715 :
3716 : /// Maintain a per timeline gauge in addition to the global gauge.
3717 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3718 : last_set: AtomicU64,
3719 : gauge: UIntGauge,
3720 : }
3721 :
3722 : impl PerTimelineRemotePhysicalSizeGauge {
3723 240 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3724 240 : Self {
3725 240 : last_set: AtomicU64::new(0),
3726 240 : gauge: per_timeline_gauge,
3727 240 : }
3728 240 : }
3729 1005 : pub(crate) fn set(&self, sz: u64) {
3730 1005 : self.gauge.set(sz);
3731 1005 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3732 1005 : if sz < prev {
3733 20 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3734 985 : } else {
3735 985 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3736 985 : };
3737 1005 : }
3738 1 : pub(crate) fn get(&self) -> u64 {
3739 1 : self.gauge.get()
3740 1 : }
3741 : }
3742 :
3743 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3744 10 : fn drop(&mut self) {
3745 10 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3746 10 : }
3747 : }
3748 :
3749 : pub(crate) struct RemoteTimelineClientMetrics {
3750 : tenant_id: String,
3751 : shard_id: String,
3752 : timeline_id: String,
3753 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3754 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3755 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3756 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3757 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3758 : }
3759 :
3760 : impl RemoteTimelineClientMetrics {
3761 240 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3762 240 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3763 240 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3764 240 : let timeline_id_str = timeline_id.to_string();
3765 :
3766 240 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3767 240 : REMOTE_PHYSICAL_SIZE
3768 240 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3769 240 : .unwrap(),
3770 : );
3771 :
3772 240 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3773 240 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3774 240 : .unwrap();
3775 :
3776 240 : RemoteTimelineClientMetrics {
3777 240 : tenant_id: tenant_id_str,
3778 240 : shard_id: shard_id_str,
3779 240 : timeline_id: timeline_id_str,
3780 240 : calls: Mutex::new(HashMap::default()),
3781 240 : bytes_started_counter: Mutex::new(HashMap::default()),
3782 240 : bytes_finished_counter: Mutex::new(HashMap::default()),
3783 240 : remote_physical_size_gauge,
3784 240 : projected_remote_consistent_lsn_gauge,
3785 240 : }
3786 240 : }
3787 :
3788 1667 : pub fn remote_operation_time(
3789 1667 : &self,
3790 1667 : task_kind: Option<TaskKind>,
3791 1667 : file_kind: &RemoteOpFileKind,
3792 1667 : op_kind: &RemoteOpKind,
3793 1667 : status: &'static str,
3794 1667 : ) -> Histogram {
3795 1667 : REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY
3796 1667 : .get_metric_with_label_values(&[
3797 1667 : task_kind.as_ref().map(|tk| tk.into()).unwrap_or("unknown"),
3798 1667 : file_kind.as_str(),
3799 1667 : op_kind.as_str(),
3800 1667 : status,
3801 : ])
3802 1667 : .unwrap()
3803 1667 : }
3804 :
3805 3831 : fn calls_counter_pair(
3806 3831 : &self,
3807 3831 : file_kind: &RemoteOpFileKind,
3808 3831 : op_kind: &RemoteOpKind,
3809 3831 : ) -> IntCounterPair {
3810 3831 : let mut guard = self.calls.lock().unwrap();
3811 3831 : let key = (file_kind.as_str(), op_kind.as_str());
3812 3831 : let metric = guard.entry(key).or_insert_with(move || {
3813 431 : REMOTE_TIMELINE_CLIENT_CALLS
3814 431 : .get_metric_with_label_values(&[
3815 431 : &self.tenant_id,
3816 431 : &self.shard_id,
3817 431 : &self.timeline_id,
3818 431 : key.0,
3819 431 : key.1,
3820 431 : ])
3821 431 : .unwrap()
3822 431 : });
3823 3831 : metric.clone()
3824 3831 : }
3825 :
3826 895 : fn bytes_started_counter(
3827 895 : &self,
3828 895 : file_kind: &RemoteOpFileKind,
3829 895 : op_kind: &RemoteOpKind,
3830 895 : ) -> IntCounter {
3831 895 : let mut guard = self.bytes_started_counter.lock().unwrap();
3832 895 : let key = (file_kind.as_str(), op_kind.as_str());
3833 895 : let metric = guard.entry(key).or_insert_with(move || {
3834 170 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3835 170 : .get_metric_with_label_values(&[
3836 170 : &self.tenant_id,
3837 170 : &self.shard_id,
3838 170 : &self.timeline_id,
3839 170 : key.0,
3840 170 : key.1,
3841 170 : ])
3842 170 : .unwrap()
3843 170 : });
3844 895 : metric.clone()
3845 895 : }
3846 :
3847 1775 : fn bytes_finished_counter(
3848 1775 : &self,
3849 1775 : file_kind: &RemoteOpFileKind,
3850 1775 : op_kind: &RemoteOpKind,
3851 1775 : ) -> IntCounter {
3852 1775 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3853 1775 : let key = (file_kind.as_str(), op_kind.as_str());
3854 1775 : let metric = guard.entry(key).or_insert_with(move || {
3855 170 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3856 170 : .get_metric_with_label_values(&[
3857 170 : &self.tenant_id,
3858 170 : &self.shard_id,
3859 170 : &self.timeline_id,
3860 170 : key.0,
3861 170 : key.1,
3862 170 : ])
3863 170 : .unwrap()
3864 170 : });
3865 1775 : metric.clone()
3866 1775 : }
3867 : }
3868 :
3869 : #[cfg(test)]
3870 : impl RemoteTimelineClientMetrics {
3871 3 : pub fn get_bytes_started_counter_value(
3872 3 : &self,
3873 3 : file_kind: &RemoteOpFileKind,
3874 3 : op_kind: &RemoteOpKind,
3875 3 : ) -> Option<u64> {
3876 3 : let guard = self.bytes_started_counter.lock().unwrap();
3877 3 : let key = (file_kind.as_str(), op_kind.as_str());
3878 3 : guard.get(&key).map(|counter| counter.get())
3879 3 : }
3880 :
3881 3 : pub fn get_bytes_finished_counter_value(
3882 3 : &self,
3883 3 : file_kind: &RemoteOpFileKind,
3884 3 : op_kind: &RemoteOpKind,
3885 3 : ) -> Option<u64> {
3886 3 : let guard = self.bytes_finished_counter.lock().unwrap();
3887 3 : let key = (file_kind.as_str(), op_kind.as_str());
3888 3 : guard.get(&key).map(|counter| counter.get())
3889 3 : }
3890 : }
3891 :
3892 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3893 : #[must_use]
3894 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3895 : /// Decremented on drop.
3896 : calls_counter_pair: Option<IntCounterPair>,
3897 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3898 : bytes_finished: Option<(IntCounter, u64)>,
3899 : }
3900 :
3901 : impl RemoteTimelineClientCallMetricGuard {
3902 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3903 : /// The caller vouches to do the metric updates manually.
3904 1953 : pub fn will_decrement_manually(mut self) {
3905 : let RemoteTimelineClientCallMetricGuard {
3906 1953 : calls_counter_pair,
3907 1953 : bytes_finished,
3908 1953 : } = &mut self;
3909 1953 : calls_counter_pair.take();
3910 1953 : bytes_finished.take();
3911 1953 : }
3912 : }
3913 :
3914 : impl Drop for RemoteTimelineClientCallMetricGuard {
3915 1970 : fn drop(&mut self) {
3916 : let RemoteTimelineClientCallMetricGuard {
3917 1970 : calls_counter_pair,
3918 1970 : bytes_finished,
3919 1970 : } = self;
3920 1970 : if let Some(guard) = calls_counter_pair.take() {
3921 17 : guard.dec();
3922 1953 : }
3923 1970 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3924 0 : bytes_finished_metric.inc_by(*value);
3925 1970 : }
3926 1970 : }
3927 : }
3928 :
3929 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3930 : /// track the byte size of this call in applicable metric(s).
3931 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3932 : /// Do not account for this call's byte size in any metrics.
3933 : /// The `reason` field is there to make the call sites self-documenting
3934 : /// about why they don't need the metric.
3935 : DontTrackSize { reason: &'static str },
3936 : /// Track the byte size of the call in applicable metric(s).
3937 : Bytes(u64),
3938 : }
3939 :
3940 : impl RemoteTimelineClientMetrics {
3941 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3942 : ///
3943 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3944 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3945 : /// is more suitable.
3946 : /// Never do both.
3947 1970 : pub(crate) fn call_begin(
3948 1970 : &self,
3949 1970 : file_kind: &RemoteOpFileKind,
3950 1970 : op_kind: &RemoteOpKind,
3951 1970 : size: RemoteTimelineClientMetricsCallTrackSize,
3952 1970 : ) -> RemoteTimelineClientCallMetricGuard {
3953 1970 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3954 1970 : calls_counter_pair.inc();
3955 :
3956 1970 : let bytes_finished = match size {
3957 1075 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3958 : // nothing to do
3959 1075 : None
3960 : }
3961 895 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3962 895 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3963 895 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3964 895 : Some((finished_counter, size))
3965 : }
3966 : };
3967 1970 : RemoteTimelineClientCallMetricGuard {
3968 1970 : calls_counter_pair: Some(calls_counter_pair),
3969 1970 : bytes_finished,
3970 1970 : }
3971 1970 : }
3972 :
3973 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3974 : /// Using the guard object is generally preferable.
3975 : /// See [`call_begin`](Self::call_begin) for more context.
3976 1861 : pub(crate) fn call_end(
3977 1861 : &self,
3978 1861 : file_kind: &RemoteOpFileKind,
3979 1861 : op_kind: &RemoteOpKind,
3980 1861 : size: RemoteTimelineClientMetricsCallTrackSize,
3981 1861 : ) {
3982 1861 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3983 1861 : calls_counter_pair.dec();
3984 1861 : match size {
3985 981 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3986 880 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3987 880 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3988 880 : }
3989 : }
3990 1861 : }
3991 : }
3992 :
3993 : impl Drop for RemoteTimelineClientMetrics {
3994 10 : fn drop(&mut self) {
3995 : let RemoteTimelineClientMetrics {
3996 10 : tenant_id,
3997 10 : shard_id,
3998 10 : timeline_id,
3999 10 : remote_physical_size_gauge,
4000 10 : calls,
4001 10 : bytes_started_counter,
4002 10 : bytes_finished_counter,
4003 10 : projected_remote_consistent_lsn_gauge,
4004 10 : } = self;
4005 12 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
4006 12 : let mut res = [Ok(()), Ok(())];
4007 12 : REMOTE_TIMELINE_CLIENT_CALLS
4008 12 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
4009 12 : // don't care about results
4010 12 : }
4011 10 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
4012 3 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
4013 3 : tenant_id,
4014 3 : shard_id,
4015 3 : timeline_id,
4016 3 : a,
4017 3 : b,
4018 3 : ]);
4019 3 : }
4020 10 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
4021 3 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
4022 3 : tenant_id,
4023 3 : shard_id,
4024 3 : timeline_id,
4025 3 : a,
4026 3 : b,
4027 3 : ]);
4028 3 : }
4029 10 : {
4030 10 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
4031 10 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
4032 10 : }
4033 10 : {
4034 10 : let _ = projected_remote_consistent_lsn_gauge;
4035 10 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
4036 10 : tenant_id,
4037 10 : shard_id,
4038 10 : timeline_id,
4039 10 : ]);
4040 10 : }
4041 10 : }
4042 : }
4043 :
4044 : /// Wrapper future that measures the time spent by a remote storage operation,
4045 : /// and records the time and success/failure as a prometheus metric.
4046 : pub(crate) trait MeasureRemoteOp<O, E>: Sized + Future<Output = Result<O, E>> {
4047 1681 : async fn measure_remote_op(
4048 1681 : self,
4049 1681 : task_kind: Option<TaskKind>, // not all caller contexts have a RequestContext / TaskKind handy
4050 1681 : file_kind: RemoteOpFileKind,
4051 1681 : op: RemoteOpKind,
4052 1681 : metrics: Arc<RemoteTimelineClientMetrics>,
4053 1681 : ) -> Result<O, E> {
4054 1681 : let start = Instant::now();
4055 1681 : let res = self.await;
4056 1667 : let duration = start.elapsed();
4057 1667 : let status = if res.is_ok() { &"success" } else { &"failure" };
4058 1667 : metrics
4059 1667 : .remote_operation_time(task_kind, &file_kind, &op, status)
4060 1667 : .observe(duration.as_secs_f64());
4061 1667 : res
4062 1667 : }
4063 : }
4064 :
4065 : impl<Fut, O, E> MeasureRemoteOp<O, E> for Fut where Fut: Sized + Future<Output = Result<O, E>> {}
4066 :
4067 : pub mod tokio_epoll_uring {
4068 : use std::collections::HashMap;
4069 : use std::sync::{Arc, Mutex};
4070 :
4071 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
4072 : use once_cell::sync::Lazy;
4073 :
4074 : /// Shared storage for tokio-epoll-uring thread local metrics.
4075 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
4076 122 : Lazy::new(|| {
4077 122 : let slots_submission_queue_depth = register_histogram!(
4078 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
4079 : "The slots waiters queue depth of each tokio_epoll_uring system",
4080 122 : vec![
4081 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
4082 : ],
4083 : )
4084 122 : .expect("failed to define a metric");
4085 122 : ThreadLocalMetricsStorage {
4086 122 : observers: Mutex::new(HashMap::new()),
4087 122 : slots_submission_queue_depth,
4088 122 : }
4089 122 : });
4090 :
4091 : pub struct ThreadLocalMetricsStorage {
4092 : /// List of thread local metrics observers.
4093 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
4094 : /// A histogram shared between all thread local systems
4095 : /// for collecting slots submission queue depth.
4096 : slots_submission_queue_depth: Histogram,
4097 : }
4098 :
4099 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
4100 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
4101 : ///
4102 : /// The System makes observations into [`Self`] and periodically, the collector
4103 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
4104 : ///
4105 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
4106 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
4107 : /// for cache coherence protocol to get an exclusive cache line.
4108 : pub struct ThreadLocalMetrics {
4109 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
4110 : slots_submission_queue_depth: Mutex<LocalHistogram>,
4111 : }
4112 :
4113 : impl ThreadLocalMetricsStorage {
4114 : /// Registers a new thread local system. Returns a thread local metrics observer.
4115 548 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
4116 548 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
4117 548 : self.slots_submission_queue_depth.local(),
4118 : ));
4119 548 : let mut g = self.observers.lock().unwrap();
4120 548 : g.insert(id, Arc::clone(&per_system_metrics));
4121 548 : per_system_metrics
4122 548 : }
4123 :
4124 : /// Removes metrics observer for a thread local system.
4125 : /// This should be called before dropping a thread local system.
4126 122 : pub fn remove_system(&self, id: u64) {
4127 122 : let mut g = self.observers.lock().unwrap();
4128 122 : g.remove(&id);
4129 122 : }
4130 :
4131 : /// Flush all thread local metrics to the shared storage.
4132 0 : pub fn flush_thread_local_metrics(&self) {
4133 0 : let g = self.observers.lock().unwrap();
4134 0 : g.values().for_each(|local| {
4135 0 : local.flush();
4136 0 : });
4137 0 : }
4138 : }
4139 :
4140 : impl ThreadLocalMetrics {
4141 548 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
4142 548 : ThreadLocalMetrics {
4143 548 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
4144 548 : }
4145 548 : }
4146 :
4147 : /// Flushes the thread local metrics to shared aggregator.
4148 0 : pub fn flush(&self) {
4149 : let Self {
4150 0 : slots_submission_queue_depth,
4151 0 : } = self;
4152 0 : slots_submission_queue_depth.lock().unwrap().flush();
4153 0 : }
4154 : }
4155 :
4156 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
4157 399794 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
4158 : let Self {
4159 399794 : slots_submission_queue_depth,
4160 399794 : } = self;
4161 399794 : slots_submission_queue_depth
4162 399794 : .lock()
4163 399794 : .unwrap()
4164 399794 : .observe(queue_depth as f64);
4165 399794 : }
4166 : }
4167 :
4168 : pub struct Collector {
4169 : descs: Vec<metrics::core::Desc>,
4170 : systems_created: UIntGauge,
4171 : systems_destroyed: UIntGauge,
4172 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
4173 : }
4174 :
4175 : impl metrics::core::Collector for Collector {
4176 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
4177 0 : self.descs.iter().collect()
4178 0 : }
4179 :
4180 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
4181 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
4182 : let tokio_epoll_uring::metrics::GlobalMetrics {
4183 0 : systems_created,
4184 0 : systems_destroyed,
4185 0 : } = tokio_epoll_uring::metrics::global();
4186 0 : self.systems_created.set(systems_created);
4187 0 : mfs.extend(self.systems_created.collect());
4188 0 : self.systems_destroyed.set(systems_destroyed);
4189 0 : mfs.extend(self.systems_destroyed.collect());
4190 :
4191 0 : self.thread_local_metrics_storage
4192 0 : .flush_thread_local_metrics();
4193 :
4194 0 : mfs.extend(
4195 0 : self.thread_local_metrics_storage
4196 0 : .slots_submission_queue_depth
4197 0 : .collect(),
4198 : );
4199 0 : mfs
4200 0 : }
4201 : }
4202 :
4203 : impl Collector {
4204 : const NMETRICS: usize = 3;
4205 :
4206 : #[allow(clippy::new_without_default)]
4207 0 : pub fn new() -> Self {
4208 0 : let mut descs = Vec::new();
4209 :
4210 0 : let systems_created = UIntGauge::new(
4211 : "pageserver_tokio_epoll_uring_systems_created",
4212 : "counter of tokio-epoll-uring systems that were created",
4213 : )
4214 0 : .unwrap();
4215 0 : descs.extend(
4216 0 : metrics::core::Collector::desc(&systems_created)
4217 0 : .into_iter()
4218 0 : .cloned(),
4219 : );
4220 :
4221 0 : let systems_destroyed = UIntGauge::new(
4222 : "pageserver_tokio_epoll_uring_systems_destroyed",
4223 : "counter of tokio-epoll-uring systems that were destroyed",
4224 : )
4225 0 : .unwrap();
4226 0 : descs.extend(
4227 0 : metrics::core::Collector::desc(&systems_destroyed)
4228 0 : .into_iter()
4229 0 : .cloned(),
4230 : );
4231 :
4232 0 : Self {
4233 0 : descs,
4234 0 : systems_created,
4235 0 : systems_destroyed,
4236 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
4237 0 : }
4238 0 : }
4239 : }
4240 :
4241 122 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4242 122 : register_int_counter!(
4243 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
4244 : "Number of times where thread_local_system creation spanned multiple executor threads",
4245 : )
4246 122 : .unwrap()
4247 122 : });
4248 :
4249 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4250 0 : register_int_counter!(
4251 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
4252 : "Number of times thread_local_system creation failed and was retried after back-off.",
4253 : )
4254 0 : .unwrap()
4255 0 : });
4256 : }
4257 :
4258 : pub(crate) struct GlobalAndPerTenantIntCounter {
4259 : global: IntCounter,
4260 : per_tenant: IntCounter,
4261 : }
4262 :
4263 : impl GlobalAndPerTenantIntCounter {
4264 : #[inline(always)]
4265 0 : pub(crate) fn inc(&self) {
4266 0 : self.inc_by(1)
4267 0 : }
4268 : #[inline(always)]
4269 112980 : pub(crate) fn inc_by(&self, n: u64) {
4270 112980 : self.global.inc_by(n);
4271 112980 : self.per_tenant.inc_by(n);
4272 112980 : }
4273 : }
4274 :
4275 : pub(crate) mod tenant_throttling {
4276 : use metrics::register_int_counter_vec;
4277 : use once_cell::sync::Lazy;
4278 : use utils::shard::TenantShardId;
4279 :
4280 : use super::GlobalAndPerTenantIntCounter;
4281 :
4282 : pub(crate) struct Metrics<const KIND: usize> {
4283 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
4284 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
4285 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
4286 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
4287 : }
4288 :
4289 110 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4290 110 : register_int_counter_vec!(
4291 : "pageserver_tenant_throttling_count_accounted_start_global",
4292 : "Count of tenant throttling starts, by kind of throttle.",
4293 110 : &["kind"]
4294 : )
4295 110 : .unwrap()
4296 110 : });
4297 110 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4298 110 : register_int_counter_vec!(
4299 : "pageserver_tenant_throttling_count_accounted_start",
4300 : "Count of tenant throttling starts, by kind of throttle.",
4301 110 : &["kind", "tenant_id", "shard_id"]
4302 : )
4303 110 : .unwrap()
4304 110 : });
4305 110 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4306 110 : register_int_counter_vec!(
4307 : "pageserver_tenant_throttling_count_accounted_finish_global",
4308 : "Count of tenant throttling finishes, by kind of throttle.",
4309 110 : &["kind"]
4310 : )
4311 110 : .unwrap()
4312 110 : });
4313 110 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4314 110 : register_int_counter_vec!(
4315 : "pageserver_tenant_throttling_count_accounted_finish",
4316 : "Count of tenant throttling finishes, by kind of throttle.",
4317 110 : &["kind", "tenant_id", "shard_id"]
4318 : )
4319 110 : .unwrap()
4320 110 : });
4321 110 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4322 110 : register_int_counter_vec!(
4323 : "pageserver_tenant_throttling_wait_usecs_sum_global",
4324 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4325 110 : &["kind"]
4326 : )
4327 110 : .unwrap()
4328 110 : });
4329 110 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4330 110 : register_int_counter_vec!(
4331 : "pageserver_tenant_throttling_wait_usecs_sum",
4332 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4333 110 : &["kind", "tenant_id", "shard_id"]
4334 : )
4335 110 : .unwrap()
4336 110 : });
4337 :
4338 110 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4339 110 : register_int_counter_vec!(
4340 : "pageserver_tenant_throttling_count_global",
4341 : "Count of tenant throttlings, by kind of throttle.",
4342 110 : &["kind"]
4343 : )
4344 110 : .unwrap()
4345 110 : });
4346 110 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4347 110 : register_int_counter_vec!(
4348 : "pageserver_tenant_throttling_count",
4349 : "Count of tenant throttlings, by kind of throttle.",
4350 110 : &["kind", "tenant_id", "shard_id"]
4351 : )
4352 110 : .unwrap()
4353 110 : });
4354 :
4355 : const KINDS: &[&str] = &["pagestream"];
4356 : pub type Pagestream = Metrics<0>;
4357 :
4358 : impl<const KIND: usize> Metrics<KIND> {
4359 119 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
4360 119 : let per_tenant_label_values = &[
4361 119 : KINDS[KIND],
4362 119 : &tenant_shard_id.tenant_id.to_string(),
4363 119 : &tenant_shard_id.shard_slug().to_string(),
4364 119 : ];
4365 119 : Metrics {
4366 119 : count_accounted_start: {
4367 119 : GlobalAndPerTenantIntCounter {
4368 119 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
4369 119 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
4370 119 : .with_label_values(per_tenant_label_values),
4371 119 : }
4372 119 : },
4373 119 : count_accounted_finish: {
4374 119 : GlobalAndPerTenantIntCounter {
4375 119 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
4376 119 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
4377 119 : .with_label_values(per_tenant_label_values),
4378 119 : }
4379 119 : },
4380 119 : wait_time: {
4381 119 : GlobalAndPerTenantIntCounter {
4382 119 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
4383 119 : per_tenant: WAIT_USECS_PER_TENANT
4384 119 : .with_label_values(per_tenant_label_values),
4385 119 : }
4386 119 : },
4387 119 : count_throttled: {
4388 119 : GlobalAndPerTenantIntCounter {
4389 119 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
4390 119 : per_tenant: WAIT_COUNT_PER_TENANT
4391 119 : .with_label_values(per_tenant_label_values),
4392 119 : }
4393 119 : },
4394 119 : }
4395 119 : }
4396 : }
4397 :
4398 0 : pub(crate) fn preinitialize_global_metrics() {
4399 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4400 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4401 0 : Lazy::force(&WAIT_USECS);
4402 0 : Lazy::force(&WAIT_COUNT);
4403 0 : }
4404 :
4405 3 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4406 12 : for m in &[
4407 3 : &COUNT_ACCOUNTED_START_PER_TENANT,
4408 3 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4409 3 : &WAIT_USECS_PER_TENANT,
4410 3 : &WAIT_COUNT_PER_TENANT,
4411 3 : ] {
4412 24 : for kind in KINDS {
4413 12 : let _ = m.remove_label_values(&[
4414 12 : kind,
4415 12 : &tenant_shard_id.tenant_id.to_string(),
4416 12 : &tenant_shard_id.shard_slug().to_string(),
4417 12 : ]);
4418 12 : }
4419 : }
4420 3 : }
4421 : }
4422 :
4423 : pub(crate) mod disk_usage_based_eviction {
4424 : use super::*;
4425 :
4426 : pub(crate) struct Metrics {
4427 : pub(crate) tenant_collection_time: Histogram,
4428 : pub(crate) tenant_layer_count: Histogram,
4429 : pub(crate) layers_collected: IntCounter,
4430 : pub(crate) layers_selected: IntCounter,
4431 : pub(crate) layers_evicted: IntCounter,
4432 : /*BEGIN_HADRON */
4433 : pub(crate) bytes_evicted: IntCounter,
4434 : /*END_HADRON */
4435 : }
4436 :
4437 : impl Default for Metrics {
4438 0 : fn default() -> Self {
4439 0 : let tenant_collection_time = register_histogram!(
4440 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4441 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4442 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4443 : )
4444 0 : .unwrap();
4445 :
4446 0 : let tenant_layer_count = register_histogram!(
4447 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4448 : "Amount of layers gathered from a tenant",
4449 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4450 : )
4451 0 : .unwrap();
4452 :
4453 0 : let layers_collected = register_int_counter!(
4454 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4455 : "Amount of layers collected"
4456 : )
4457 0 : .unwrap();
4458 :
4459 0 : let layers_selected = register_int_counter!(
4460 : "pageserver_disk_usage_based_eviction_select_layers_total",
4461 : "Amount of layers selected"
4462 : )
4463 0 : .unwrap();
4464 :
4465 0 : let layers_evicted = register_int_counter!(
4466 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4467 : "Amount of layers successfully evicted"
4468 : )
4469 0 : .unwrap();
4470 :
4471 : /*BEGIN_HADRON */
4472 0 : let bytes_evicted = register_int_counter!(
4473 : "pageserver_disk_usage_based_eviction_evicted_bytes_total",
4474 : "Amount of bytes successfully evicted"
4475 : )
4476 0 : .unwrap();
4477 : /*END_HADRON */
4478 :
4479 0 : Self {
4480 0 : tenant_collection_time,
4481 0 : tenant_layer_count,
4482 0 : layers_collected,
4483 0 : layers_selected,
4484 0 : layers_evicted,
4485 0 : bytes_evicted,
4486 0 : }
4487 0 : }
4488 : }
4489 :
4490 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4491 : }
4492 :
4493 107 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4494 107 : register_uint_gauge_vec!(
4495 : "pageserver_tokio_executor_thread_configured_count",
4496 : "Total number of configued tokio executor threads in the process.
4497 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4498 107 : &["setup"],
4499 : )
4500 107 : .unwrap()
4501 107 : });
4502 :
4503 107 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4504 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4505 107 : let _guard = SERIALIZE.lock().unwrap();
4506 107 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4507 107 : TOKIO_EXECUTOR_THREAD_COUNT
4508 107 : .get_metric_with_label_values(&[setup])
4509 107 : .unwrap()
4510 107 : .set(u64::try_from(num_threads.get()).unwrap());
4511 107 : }
4512 :
4513 110 : pub(crate) static BASEBACKUP_CACHE_READ: Lazy<IntCounterVec> = Lazy::new(|| {
4514 110 : register_int_counter_vec!(
4515 : "pageserver_basebackup_cache_read_total",
4516 : "Number of read accesses to the basebackup cache grouped by hit/miss/error",
4517 110 : &["result"]
4518 : )
4519 110 : .expect("failed to define a metric")
4520 110 : });
4521 :
4522 110 : pub(crate) static BASEBACKUP_CACHE_PREPARE: Lazy<IntCounterVec> = Lazy::new(|| {
4523 110 : register_int_counter_vec!(
4524 : "pageserver_basebackup_cache_prepare_total",
4525 : "Number of prepare requests processed by the basebackup cache grouped by ok/skip/error",
4526 110 : &["result"]
4527 : )
4528 110 : .expect("failed to define a metric")
4529 110 : });
4530 :
4531 0 : pub(crate) static BASEBACKUP_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
4532 0 : register_uint_gauge!(
4533 : "pageserver_basebackup_cache_entries_total",
4534 : "Number of entries in the basebackup cache"
4535 : )
4536 0 : .expect("failed to define a metric")
4537 0 : });
4538 :
4539 0 : pub(crate) static BASEBACKUP_CACHE_SIZE: Lazy<UIntGauge> = Lazy::new(|| {
4540 0 : register_uint_gauge!(
4541 : "pageserver_basebackup_cache_size_bytes",
4542 : "Total size of all basebackup cache entries on disk in bytes"
4543 : )
4544 0 : .expect("failed to define a metric")
4545 0 : });
4546 :
4547 0 : pub(crate) static BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE: Lazy<UIntGauge> = Lazy::new(|| {
4548 0 : register_uint_gauge!(
4549 : "pageserver_basebackup_cache_prepare_queue_size",
4550 : "Number of requests in the basebackup prepare channel"
4551 : )
4552 0 : .expect("failed to define a metric")
4553 0 : });
4554 :
4555 0 : static PAGESERVER_CONFIG_IGNORED_ITEMS: Lazy<UIntGaugeVec> = Lazy::new(|| {
4556 0 : register_uint_gauge_vec!(
4557 : "pageserver_config_ignored_items",
4558 : "TOML items present in the on-disk configuration file but ignored by the pageserver config parser.\
4559 : The `item` label is the dot-separated path of the ignored item in the on-disk configuration file.\
4560 : The value for an unknown config item is always 1.\
4561 : There is a special label value \"\", which is 0, so that there is always a metric exposed (simplifies dashboards).",
4562 0 : &["item"]
4563 : )
4564 0 : .unwrap()
4565 0 : });
4566 :
4567 0 : pub fn preinitialize_metrics(
4568 0 : conf: &'static PageServerConf,
4569 0 : ignored: config::ignored_fields::Paths,
4570 0 : ) {
4571 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4572 :
4573 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4574 0 : .with_label_values(&[""])
4575 0 : .set(0);
4576 0 : for path in &ignored.paths {
4577 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4578 0 : .with_label_values(&[path])
4579 0 : .set(1);
4580 0 : }
4581 :
4582 : // Python tests need these and on some we do alerting.
4583 : //
4584 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4585 : // order:
4586 : // - global metrics reside in a Lazy<PageserverMetrics>
4587 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4588 : // - could move the statics into TimelineMetrics::new()?
4589 :
4590 : // counters
4591 0 : [
4592 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4593 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4594 0 : &WALRECEIVER_BROKER_UPDATES,
4595 0 : &WALRECEIVER_CANDIDATES_ADDED,
4596 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4597 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4598 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4599 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4600 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4601 0 : &CIRCUIT_BREAKERS_BROKEN,
4602 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4603 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4604 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4605 0 : &MISROUTED_PAGESTREAM_REQUESTS,
4606 0 : ]
4607 0 : .into_iter()
4608 0 : .for_each(|c| {
4609 0 : Lazy::force(c);
4610 0 : });
4611 :
4612 : // Deletion queue stats
4613 0 : Lazy::force(&DELETION_QUEUE);
4614 :
4615 : // Tenant stats
4616 0 : Lazy::force(&TENANT);
4617 :
4618 : // Tenant manager stats
4619 0 : Lazy::force(&TENANT_MANAGER);
4620 :
4621 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4622 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4623 :
4624 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4625 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4626 0 : // values from last restart.
4627 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4628 0 : }
4629 :
4630 : // countervecs
4631 0 : [
4632 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4633 0 : &SMGR_QUERY_STARTED_GLOBAL,
4634 0 : &PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL,
4635 0 : ]
4636 0 : .into_iter()
4637 0 : .for_each(|c| {
4638 0 : Lazy::force(c);
4639 0 : });
4640 :
4641 : // gauges
4642 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4643 0 : LOCAL_DATA_LOSS_SUSPECTED.get();
4644 :
4645 : // histograms
4646 0 : [
4647 0 : &LAYERS_PER_READ_GLOBAL,
4648 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4649 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4650 0 : &DELTAS_PER_READ_GLOBAL,
4651 0 : &WAIT_LSN_TIME,
4652 0 : &WAL_REDO_TIME,
4653 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4654 0 : &WAL_REDO_BYTES_HISTOGRAM,
4655 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4656 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4657 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4658 0 : ]
4659 0 : .into_iter()
4660 0 : .for_each(|h| {
4661 0 : Lazy::force(h);
4662 0 : });
4663 :
4664 : // Custom
4665 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4666 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4667 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4668 :
4669 0 : tenant_throttling::preinitialize_global_metrics();
4670 0 : wait_ondemand_download_time::preinitialize_global_metrics();
4671 0 : }
|