Line data Source code
1 : use enum_map::EnumMap;
2 : use metrics::{
3 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
4 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
5 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
6 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
7 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
8 : };
9 : use once_cell::sync::Lazy;
10 : use pageserver_api::shard::TenantShardId;
11 : use strum::{EnumCount, VariantNames};
12 : use strum_macros::{EnumVariantNames, IntoStaticStr};
13 : use tracing::warn;
14 : use utils::id::TimelineId;
15 :
16 : /// Prometheus histogram buckets (in seconds) for operations in the critical
17 : /// path. In other words, operations that directly affect that latency of user
18 : /// queries.
19 : ///
20 : /// The buckets capture the majority of latencies in the microsecond and
21 : /// millisecond range but also extend far enough up to distinguish "bad" from
22 : /// "really bad".
23 : const CRITICAL_OP_BUCKETS: &[f64] = &[
24 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
25 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
26 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
27 : ];
28 :
29 : // Metrics collected on operations on the storage repository.
30 3120 : #[derive(Debug, EnumVariantNames, IntoStaticStr)]
31 : #[strum(serialize_all = "kebab_case")]
32 : pub(crate) enum StorageTimeOperation {
33 : #[strum(serialize = "layer flush")]
34 : LayerFlush,
35 :
36 : #[strum(serialize = "compact")]
37 : Compact,
38 :
39 : #[strum(serialize = "create images")]
40 : CreateImages,
41 :
42 : #[strum(serialize = "logical size")]
43 : LogicalSize,
44 :
45 : #[strum(serialize = "imitate logical size")]
46 : ImitateLogicalSize,
47 :
48 : #[strum(serialize = "load layer map")]
49 : LoadLayerMap,
50 :
51 : #[strum(serialize = "gc")]
52 : Gc,
53 :
54 : #[strum(serialize = "find gc cutoffs")]
55 : FindGcCutoffs,
56 : }
57 :
58 148 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
59 : register_counter_vec!(
60 : "pageserver_storage_operations_seconds_sum",
61 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
62 : &["operation", "tenant_id", "shard_id", "timeline_id"],
63 : )
64 148 : .expect("failed to define a metric")
65 148 : });
66 :
67 148 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
68 : register_int_counter_vec!(
69 : "pageserver_storage_operations_seconds_count",
70 : "Count of storage operations with operation, tenant and timeline dimensions",
71 : &["operation", "tenant_id", "shard_id", "timeline_id"],
72 : )
73 148 : .expect("failed to define a metric")
74 148 : });
75 :
76 : // Buckets for background operations like compaction, GC, size calculation
77 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
78 :
79 148 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
80 : register_histogram_vec!(
81 : "pageserver_storage_operations_seconds_global",
82 : "Time spent on storage operations",
83 : &["operation"],
84 : STORAGE_OP_BUCKETS.into(),
85 : )
86 148 : .expect("failed to define a metric")
87 148 : });
88 :
89 146 : pub(crate) static READ_NUM_LAYERS_VISITED: Lazy<Histogram> = Lazy::new(|| {
90 : register_histogram!(
91 : "pageserver_layers_visited_per_read_global",
92 : "Number of layers visited to reconstruct one key",
93 : vec![1.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
94 : )
95 146 : .expect("failed to define a metric")
96 146 : });
97 :
98 24 : pub(crate) static VEC_READ_NUM_LAYERS_VISITED: Lazy<Histogram> = Lazy::new(|| {
99 : register_histogram!(
100 : "pageserver_layers_visited_per_vectored_read_global",
101 : "Average number of layers visited to reconstruct one key",
102 : vec![1.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
103 : )
104 24 : .expect("failed to define a metric")
105 24 : });
106 :
107 : // Metrics collected on operations on the storage repository.
108 : #[derive(
109 584 : Clone, Copy, enum_map::Enum, strum_macros::EnumString, strum_macros::Display, IntoStaticStr,
110 : )]
111 : pub(crate) enum GetKind {
112 : Singular,
113 : Vectored,
114 : }
115 :
116 : pub(crate) struct ReconstructTimeMetrics {
117 : singular: Histogram,
118 : vectored: Histogram,
119 : }
120 :
121 146 : pub(crate) static RECONSTRUCT_TIME: Lazy<ReconstructTimeMetrics> = Lazy::new(|| {
122 146 : let inner = register_histogram_vec!(
123 : "pageserver_getpage_reconstruct_seconds",
124 : "Time spent in reconstruct_value (reconstruct a page from deltas)",
125 : &["get_kind"],
126 : CRITICAL_OP_BUCKETS.into(),
127 : )
128 146 : .expect("failed to define a metric");
129 146 :
130 146 : ReconstructTimeMetrics {
131 146 : singular: inner.with_label_values(&[GetKind::Singular.into()]),
132 146 : vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
133 146 : }
134 146 : });
135 :
136 : impl ReconstructTimeMetrics {
137 625998 : pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
138 625998 : match get_kind {
139 625928 : GetKind::Singular => &self.singular,
140 70 : GetKind::Vectored => &self.vectored,
141 : }
142 625998 : }
143 : }
144 :
145 : pub(crate) struct ReconstructDataTimeMetrics {
146 : singular: Histogram,
147 : vectored: Histogram,
148 : }
149 :
150 : impl ReconstructDataTimeMetrics {
151 626138 : pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
152 626138 : match get_kind {
153 626068 : GetKind::Singular => &self.singular,
154 70 : GetKind::Vectored => &self.vectored,
155 : }
156 626138 : }
157 : }
158 :
159 146 : pub(crate) static GET_RECONSTRUCT_DATA_TIME: Lazy<ReconstructDataTimeMetrics> = Lazy::new(|| {
160 146 : let inner = register_histogram_vec!(
161 : "pageserver_getpage_get_reconstruct_data_seconds",
162 : "Time spent in get_reconstruct_value_data",
163 : &["get_kind"],
164 : CRITICAL_OP_BUCKETS.into(),
165 : )
166 146 : .expect("failed to define a metric");
167 146 :
168 146 : ReconstructDataTimeMetrics {
169 146 : singular: inner.with_label_values(&[GetKind::Singular.into()]),
170 146 : vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
171 146 : }
172 146 : });
173 :
174 : pub(crate) struct GetVectoredLatency {
175 : map: EnumMap<TaskKind, Option<Histogram>>,
176 : }
177 :
178 : #[allow(dead_code)]
179 : pub(crate) struct ScanLatency {
180 : map: EnumMap<TaskKind, Option<Histogram>>,
181 : }
182 :
183 : impl GetVectoredLatency {
184 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
185 : // cardinality of the metric.
186 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
187 :
188 1000 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
189 1000 : self.map[task_kind].as_ref()
190 1000 : }
191 : }
192 :
193 : impl ScanLatency {
194 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
195 : // cardinality of the metric.
196 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
197 :
198 12 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
199 12 : self.map[task_kind].as_ref()
200 12 : }
201 : }
202 :
203 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
204 : parent: &'a Histogram,
205 : start: std::time::Instant,
206 : }
207 :
208 : impl<'a> ScanLatencyOngoingRecording<'a> {
209 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
210 0 : let start = Instant::now();
211 0 : ScanLatencyOngoingRecording { parent, start }
212 0 : }
213 :
214 0 : pub(crate) fn observe(self, throttled: Option<Duration>) {
215 0 : let elapsed = self.start.elapsed();
216 0 : let ex_throttled = if let Some(throttled) = throttled {
217 0 : elapsed.checked_sub(throttled)
218 : } else {
219 0 : Some(elapsed)
220 : };
221 0 : if let Some(ex_throttled) = ex_throttled {
222 0 : self.parent.observe(ex_throttled.as_secs_f64());
223 0 : } else {
224 0 : use utils::rate_limit::RateLimit;
225 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
226 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
227 0 : let mut rate_limit = LOGGED.lock().unwrap();
228 0 : rate_limit.call(|| {
229 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
230 0 : });
231 0 : }
232 0 : }
233 : }
234 :
235 140 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
236 140 : let inner = register_histogram_vec!(
237 : "pageserver_get_vectored_seconds",
238 : "Time spent in get_vectored, excluding time spent in timeline_get_throttle.",
239 : &["task_kind"],
240 : CRITICAL_OP_BUCKETS.into(),
241 : )
242 140 : .expect("failed to define a metric");
243 140 :
244 140 : GetVectoredLatency {
245 4200 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
246 4200 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
247 4200 :
248 4200 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
249 280 : let task_kind = task_kind.into();
250 280 : Some(inner.with_label_values(&[task_kind]))
251 : } else {
252 3920 : None
253 : }
254 4200 : })),
255 140 : }
256 140 : });
257 :
258 4 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
259 4 : let inner = register_histogram_vec!(
260 : "pageserver_scan_seconds",
261 : "Time spent in scan, excluding time spent in timeline_get_throttle.",
262 : &["task_kind"],
263 : CRITICAL_OP_BUCKETS.into(),
264 : )
265 4 : .expect("failed to define a metric");
266 4 :
267 4 : ScanLatency {
268 120 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
269 120 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
270 120 :
271 120 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
272 4 : let task_kind = task_kind.into();
273 4 : Some(inner.with_label_values(&[task_kind]))
274 : } else {
275 116 : None
276 : }
277 120 : })),
278 4 : }
279 4 : });
280 :
281 : pub(crate) struct PageCacheMetricsForTaskKind {
282 : pub read_accesses_immutable: IntCounter,
283 : pub read_hits_immutable: IntCounter,
284 : }
285 :
286 : pub(crate) struct PageCacheMetrics {
287 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
288 : }
289 :
290 82 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
291 : register_int_counter_vec!(
292 : "pageserver_page_cache_read_hits_total",
293 : "Number of read accesses to the page cache that hit",
294 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
295 : )
296 82 : .expect("failed to define a metric")
297 82 : });
298 :
299 82 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
300 : register_int_counter_vec!(
301 : "pageserver_page_cache_read_accesses_total",
302 : "Number of read accesses to the page cache",
303 : &["task_kind", "key_kind", "content_kind"]
304 : )
305 82 : .expect("failed to define a metric")
306 82 : });
307 :
308 82 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
309 2460 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
310 2460 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind);
311 2460 : let task_kind: &'static str = task_kind.into();
312 14760 : EnumMap::from_array(std::array::from_fn(|content_kind| {
313 14760 : let content_kind = <PageContentKind as enum_map::Enum>::from_usize(content_kind);
314 14760 : let content_kind: &'static str = content_kind.into();
315 14760 : PageCacheMetricsForTaskKind {
316 14760 : read_accesses_immutable: {
317 14760 : PAGE_CACHE_READ_ACCESSES
318 14760 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
319 14760 : .unwrap()
320 14760 : },
321 14760 :
322 14760 : read_hits_immutable: {
323 14760 : PAGE_CACHE_READ_HITS
324 14760 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
325 14760 : .unwrap()
326 14760 : },
327 14760 : }
328 14760 : }))
329 2460 : })),
330 82 : });
331 :
332 : impl PageCacheMetrics {
333 14193188 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
334 14193188 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
335 14193188 : }
336 : }
337 :
338 : pub(crate) struct PageCacheSizeMetrics {
339 : pub max_bytes: UIntGauge,
340 :
341 : pub current_bytes_immutable: UIntGauge,
342 : }
343 :
344 82 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
345 : register_uint_gauge_vec!(
346 : "pageserver_page_cache_size_current_bytes",
347 : "Current size of the page cache in bytes, by key kind",
348 : &["key_kind"]
349 : )
350 82 : .expect("failed to define a metric")
351 82 : });
352 :
353 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
354 82 : Lazy::new(|| PageCacheSizeMetrics {
355 : max_bytes: {
356 : register_uint_gauge!(
357 : "pageserver_page_cache_size_max_bytes",
358 : "Maximum size of the page cache in bytes"
359 : )
360 82 : .expect("failed to define a metric")
361 82 : },
362 82 : current_bytes_immutable: {
363 82 : PAGE_CACHE_SIZE_CURRENT_BYTES
364 82 : .get_metric_with_label_values(&["immutable"])
365 82 : .unwrap()
366 82 : },
367 82 : });
368 :
369 : pub(crate) mod page_cache_eviction_metrics {
370 : use std::num::NonZeroUsize;
371 :
372 : use metrics::{register_int_counter_vec, IntCounter, IntCounterVec};
373 : use once_cell::sync::Lazy;
374 :
375 : #[derive(Clone, Copy)]
376 : pub(crate) enum Outcome {
377 : FoundSlotUnused { iters: NonZeroUsize },
378 : FoundSlotEvicted { iters: NonZeroUsize },
379 : ItersExceeded { iters: NonZeroUsize },
380 : }
381 :
382 82 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
383 : register_int_counter_vec!(
384 : "pageserver_page_cache_find_victim_iters_total",
385 : "Counter for the number of iterations in the find_victim loop",
386 : &["outcome"],
387 : )
388 82 : .expect("failed to define a metric")
389 82 : });
390 :
391 82 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
392 : register_int_counter_vec!(
393 : "pageserver_page_cache_find_victim_calls",
394 : "Incremented at the end of each find_victim() call.\
395 : Filter by outcome to get e.g., eviction rate.",
396 : &["outcome"]
397 : )
398 82 : .unwrap()
399 82 : });
400 :
401 166943 : pub(crate) fn observe(outcome: Outcome) {
402 166943 : macro_rules! dry {
403 166943 : ($label:literal, $iters:expr) => {{
404 166943 : static LABEL: &'static str = $label;
405 166943 : static ITERS_TOTAL: Lazy<IntCounter> =
406 166943 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
407 166943 : static CALLS: Lazy<IntCounter> =
408 166943 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
409 166943 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
410 166943 : CALLS.inc();
411 166943 : }};
412 166943 : }
413 166943 : match outcome {
414 1944 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
415 164999 : Outcome::FoundSlotEvicted { iters } => {
416 164999 : dry!("found_evicted", iters)
417 : }
418 0 : Outcome::ItersExceeded { iters } => {
419 0 : dry!("err_iters_exceeded", iters);
420 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
421 0 : }
422 : }
423 166943 : }
424 : }
425 :
426 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
427 : register_int_counter_vec!(
428 : "page_cache_errors_total",
429 : "Number of timeouts while acquiring a pinned slot in the page cache",
430 : &["error_kind"]
431 : )
432 0 : .expect("failed to define a metric")
433 0 : });
434 :
435 0 : #[derive(IntoStaticStr)]
436 : #[strum(serialize_all = "kebab_case")]
437 : pub(crate) enum PageCacheErrorKind {
438 : AcquirePinnedSlotTimeout,
439 : EvictIterLimit,
440 : }
441 :
442 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
443 0 : PAGE_CACHE_ERRORS
444 0 : .get_metric_with_label_values(&[error_kind.into()])
445 0 : .unwrap()
446 0 : .inc();
447 0 : }
448 :
449 16 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
450 : register_histogram!(
451 : "pageserver_wait_lsn_seconds",
452 : "Time spent waiting for WAL to arrive",
453 : CRITICAL_OP_BUCKETS.into(),
454 : )
455 16 : .expect("failed to define a metric")
456 16 : });
457 :
458 148 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
459 : register_int_gauge_vec!(
460 : "pageserver_last_record_lsn",
461 : "Last record LSN grouped by timeline",
462 : &["tenant_id", "shard_id", "timeline_id"]
463 : )
464 148 : .expect("failed to define a metric")
465 148 : });
466 :
467 148 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
468 : register_uint_gauge_vec!(
469 : "pageserver_pitr_history_size",
470 : "Data written since PITR cutoff on this timeline",
471 : &["tenant_id", "shard_id", "timeline_id"]
472 : )
473 148 : .expect("failed to define a metric")
474 148 : });
475 :
476 1592 : #[derive(strum_macros::EnumString, strum_macros::Display, strum_macros::IntoStaticStr)]
477 : #[strum(serialize_all = "kebab_case")]
478 : pub(crate) enum MetricLayerKind {
479 : Delta,
480 : Image,
481 : }
482 :
483 148 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
484 : register_uint_gauge_vec!(
485 : "pageserver_layer_bytes",
486 : "Sum of layer physical sizes in bytes",
487 : &["tenant_id", "shard_id", "timeline_id", "kind"]
488 : )
489 148 : .expect("failed to define a metric")
490 148 : });
491 :
492 148 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
493 : register_uint_gauge_vec!(
494 : "pageserver_layer_count",
495 : "Number of layers that exist",
496 : &["tenant_id", "shard_id", "timeline_id", "kind"]
497 : )
498 148 : .expect("failed to define a metric")
499 148 : });
500 :
501 148 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
502 : register_uint_gauge_vec!(
503 : "pageserver_archive_size",
504 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
505 : &["tenant_id", "shard_id", "timeline_id"]
506 : )
507 148 : .expect("failed to define a metric")
508 148 : });
509 :
510 148 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
511 : register_int_gauge_vec!(
512 : "pageserver_standby_horizon",
513 : "Standby apply LSN for which GC is hold off, by timeline.",
514 : &["tenant_id", "shard_id", "timeline_id"]
515 : )
516 148 : .expect("failed to define a metric")
517 148 : });
518 :
519 148 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
520 : register_uint_gauge_vec!(
521 : "pageserver_resident_physical_size",
522 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
523 : &["tenant_id", "shard_id", "timeline_id"]
524 : )
525 148 : .expect("failed to define a metric")
526 148 : });
527 :
528 144 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
529 : register_uint_gauge!(
530 : "pageserver_resident_physical_size_global",
531 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
532 : )
533 144 : .expect("failed to define a metric")
534 144 : });
535 :
536 148 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
537 : register_uint_gauge_vec!(
538 : "pageserver_remote_physical_size",
539 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
540 : // Corollary: If any files are missing from the index part, they won't be included here.
541 : &["tenant_id", "shard_id", "timeline_id"]
542 : )
543 148 : .expect("failed to define a metric")
544 148 : });
545 :
546 148 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
547 : register_uint_gauge!(
548 : "pageserver_remote_physical_size_global",
549 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
550 : )
551 148 : .expect("failed to define a metric")
552 148 : });
553 :
554 4 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
555 : register_int_counter!(
556 : "pageserver_remote_ondemand_downloaded_layers_total",
557 : "Total on-demand downloaded layers"
558 : )
559 4 : .unwrap()
560 4 : });
561 :
562 4 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
563 : register_int_counter!(
564 : "pageserver_remote_ondemand_downloaded_bytes_total",
565 : "Total bytes of layers on-demand downloaded",
566 : )
567 4 : .unwrap()
568 4 : });
569 :
570 148 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
571 : register_uint_gauge_vec!(
572 : "pageserver_current_logical_size",
573 : "Current logical size grouped by timeline",
574 : &["tenant_id", "shard_id", "timeline_id"]
575 : )
576 148 : .expect("failed to define current logical size metric")
577 148 : });
578 :
579 148 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
580 : register_int_gauge_vec!(
581 : "pageserver_aux_file_estimated_size",
582 : "The size of all aux files for a timeline in aux file v2 store.",
583 : &["tenant_id", "shard_id", "timeline_id"]
584 : )
585 148 : .expect("failed to define a metric")
586 148 : });
587 :
588 148 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
589 : register_uint_gauge_vec!(
590 : "pageserver_valid_lsn_lease_count",
591 : "The number of valid leases after refreshing gc info.",
592 : &["tenant_id", "shard_id", "timeline_id"],
593 : )
594 148 : .expect("failed to define a metric")
595 148 : });
596 :
597 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
598 : register_int_counter!(
599 : "pageserver_circuit_breaker_broken",
600 : "How many times a circuit breaker has broken"
601 : )
602 0 : .expect("failed to define a metric")
603 0 : });
604 :
605 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
606 : register_int_counter!(
607 : "pageserver_circuit_breaker_unbroken",
608 : "How many times a circuit breaker has been un-broken (recovered)"
609 : )
610 0 : .expect("failed to define a metric")
611 0 : });
612 :
613 140 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
614 : register_int_counter!(
615 : "pageserver_compression_image_in_bytes_total",
616 : "Size of uncompressed data written into image layers"
617 : )
618 140 : .expect("failed to define a metric")
619 140 : });
620 :
621 140 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
622 : register_int_counter!(
623 : "pageserver_compression_image_out_bytes_total",
624 : "Size of compressed image layer written"
625 : )
626 140 : .expect("failed to define a metric")
627 140 : });
628 :
629 : pub(crate) mod initial_logical_size {
630 : use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
631 : use once_cell::sync::Lazy;
632 :
633 : pub(crate) struct StartCalculation(IntCounterVec);
634 148 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
635 148 : StartCalculation(
636 148 : register_int_counter_vec!(
637 : "pageserver_initial_logical_size_start_calculation",
638 : "Incremented each time we start an initial logical size calculation attempt. \
639 : The `circumstances` label provides some additional details.",
640 : &["attempt", "circumstances"]
641 148 : )
642 148 : .unwrap(),
643 148 : )
644 148 : });
645 :
646 : struct DropCalculation {
647 : first: IntCounter,
648 : retry: IntCounter,
649 : }
650 :
651 148 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
652 148 : let vec = register_int_counter_vec!(
653 : "pageserver_initial_logical_size_drop_calculation",
654 : "Incremented each time we abort a started size calculation attmpt.",
655 : &["attempt"]
656 : )
657 148 : .unwrap();
658 148 : DropCalculation {
659 148 : first: vec.with_label_values(&["first"]),
660 148 : retry: vec.with_label_values(&["retry"]),
661 148 : }
662 148 : });
663 :
664 : pub(crate) struct Calculated {
665 : pub(crate) births: IntCounter,
666 : pub(crate) deaths: IntCounter,
667 : }
668 :
669 148 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
670 : births: register_int_counter!(
671 : "pageserver_initial_logical_size_finish_calculation",
672 : "Incremented every time we finish calculation of initial logical size.\
673 : If everything is working well, this should happen at most once per Timeline object."
674 : )
675 148 : .unwrap(),
676 : deaths: register_int_counter!(
677 : "pageserver_initial_logical_size_drop_finished_calculation",
678 : "Incremented when we drop a finished initial logical size calculation result.\
679 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
680 : )
681 148 : .unwrap(),
682 148 : });
683 :
684 : pub(crate) struct OngoingCalculationGuard {
685 : inc_drop_calculation: Option<IntCounter>,
686 : }
687 :
688 160 : #[derive(strum_macros::IntoStaticStr)]
689 : pub(crate) enum StartCircumstances {
690 : EmptyInitial,
691 : SkippedConcurrencyLimiter,
692 : AfterBackgroundTasksRateLimit,
693 : }
694 :
695 : impl StartCalculation {
696 160 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
697 160 : let circumstances_label: &'static str = circumstances.into();
698 160 : self.0
699 160 : .with_label_values(&["first", circumstances_label])
700 160 : .inc();
701 160 : OngoingCalculationGuard {
702 160 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
703 160 : }
704 160 : }
705 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
706 0 : let circumstances_label: &'static str = circumstances.into();
707 0 : self.0
708 0 : .with_label_values(&["retry", circumstances_label])
709 0 : .inc();
710 0 : OngoingCalculationGuard {
711 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
712 0 : }
713 0 : }
714 : }
715 :
716 : impl Drop for OngoingCalculationGuard {
717 160 : fn drop(&mut self) {
718 160 : if let Some(counter) = self.inc_drop_calculation.take() {
719 0 : counter.inc();
720 160 : }
721 160 : }
722 : }
723 :
724 : impl OngoingCalculationGuard {
725 160 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
726 160 : drop(self.inc_drop_calculation.take());
727 160 : CALCULATED.births.inc();
728 160 : FinishedCalculationGuard {
729 160 : inc_on_drop: CALCULATED.deaths.clone(),
730 160 : }
731 160 : }
732 : }
733 :
734 : pub(crate) struct FinishedCalculationGuard {
735 : inc_on_drop: IntCounter,
736 : }
737 :
738 : impl Drop for FinishedCalculationGuard {
739 6 : fn drop(&mut self) {
740 6 : self.inc_on_drop.inc();
741 6 : }
742 : }
743 :
744 : // context: https://github.com/neondatabase/neon/issues/5963
745 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
746 0 : Lazy::new(|| {
747 : register_int_counter!(
748 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
749 : "Counter for the following event: walreceiver calls\
750 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
751 : )
752 0 : .unwrap()
753 0 : });
754 : }
755 :
756 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
757 : register_uint_gauge_vec!(
758 : "pageserver_directory_entries_count",
759 : "Sum of the entries in pageserver-stored directory listings",
760 : &["tenant_id", "shard_id", "timeline_id"]
761 : )
762 0 : .expect("failed to define a metric")
763 0 : });
764 :
765 150 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
766 : register_uint_gauge_vec!(
767 : "pageserver_tenant_states_count",
768 : "Count of tenants per state",
769 : &["state"]
770 : )
771 150 : .expect("Failed to register pageserver_tenant_states_count metric")
772 150 : });
773 :
774 : /// A set of broken tenants.
775 : ///
776 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
777 : /// tenant.
778 12 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
779 : register_uint_gauge_vec!(
780 : "pageserver_broken_tenants_count",
781 : "Set of broken tenants",
782 : &["tenant_id", "shard_id"]
783 : )
784 12 : .expect("Failed to register pageserver_tenant_states_count metric")
785 12 : });
786 :
787 6 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
788 : register_uint_gauge_vec!(
789 : "pageserver_tenant_synthetic_cached_size_bytes",
790 : "Synthetic size of each tenant in bytes",
791 : &["tenant_id"]
792 : )
793 6 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
794 6 : });
795 :
796 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
797 : register_histogram_vec!(
798 : "pageserver_eviction_iteration_duration_seconds_global",
799 : "Time spent on a single eviction iteration",
800 : &["period_secs", "threshold_secs"],
801 : STORAGE_OP_BUCKETS.into(),
802 : )
803 0 : .expect("failed to define a metric")
804 0 : });
805 :
806 148 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
807 : register_int_counter_vec!(
808 : "pageserver_evictions",
809 : "Number of layers evicted from the pageserver",
810 : &["tenant_id", "shard_id", "timeline_id"]
811 : )
812 148 : .expect("failed to define a metric")
813 148 : });
814 :
815 148 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
816 : register_int_counter_vec!(
817 : "pageserver_evictions_with_low_residence_duration",
818 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
819 : Residence duration is determined using the `residence_duration_data_source`.",
820 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
821 : )
822 148 : .expect("failed to define a metric")
823 148 : });
824 :
825 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
826 : register_int_counter!(
827 : "pageserver_unexpected_ondemand_downloads_count",
828 : "Number of unexpected on-demand downloads. \
829 : We log more context for each increment, so, forgo any labels in this metric.",
830 : )
831 0 : .expect("failed to define a metric")
832 0 : });
833 :
834 : /// How long did we take to start up? Broken down by labels to describe
835 : /// different phases of startup.
836 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
837 : register_gauge_vec!(
838 : "pageserver_startup_duration_seconds",
839 : "Time taken by phases of pageserver startup, in seconds",
840 : &["phase"]
841 : )
842 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
843 0 : });
844 :
845 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
846 : register_uint_gauge!(
847 : "pageserver_startup_is_loading",
848 : "1 while in initial startup load of tenants, 0 at other times"
849 : )
850 0 : .expect("Failed to register pageserver_startup_is_loading")
851 0 : });
852 :
853 144 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
854 : register_uint_gauge!(
855 : "pageserver_timeline_ephemeral_bytes",
856 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
857 : )
858 144 : .expect("Failed to register metric")
859 144 : });
860 :
861 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
862 : /// like how long it took to load.
863 : ///
864 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
865 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
866 : /// at a timeline level than tenant level.
867 : pub(crate) struct TenantMetrics {
868 : /// How long did tenants take to go from construction to active state?
869 : pub(crate) activation: Histogram,
870 : pub(crate) preload: Histogram,
871 : pub(crate) attach: Histogram,
872 :
873 : /// How many tenants are included in the initial startup of the pagesrever?
874 : pub(crate) startup_scheduled: IntCounter,
875 : pub(crate) startup_complete: IntCounter,
876 : }
877 :
878 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
879 0 : TenantMetrics {
880 0 : activation: register_histogram!(
881 : "pageserver_tenant_activation_seconds",
882 : "Time taken by tenants to activate, in seconds",
883 : CRITICAL_OP_BUCKETS.into()
884 0 : )
885 0 : .expect("Failed to register metric"),
886 0 : preload: register_histogram!(
887 : "pageserver_tenant_preload_seconds",
888 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
889 : CRITICAL_OP_BUCKETS.into()
890 0 : )
891 0 : .expect("Failed to register metric"),
892 0 : attach: register_histogram!(
893 : "pageserver_tenant_attach_seconds",
894 : "Time taken by tenants to intialize, after remote metadata is already loaded",
895 : CRITICAL_OP_BUCKETS.into()
896 0 : )
897 0 : .expect("Failed to register metric"),
898 0 : startup_scheduled: register_int_counter!(
899 : "pageserver_tenant_startup_scheduled",
900 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
901 0 : ).expect("Failed to register metric"),
902 0 : startup_complete: register_int_counter!(
903 : "pageserver_tenant_startup_complete",
904 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
905 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
906 : tenants: such cases will lead to this metric never reaching the scheduled count."
907 0 : ).expect("Failed to register metric"),
908 0 : }
909 0 : });
910 :
911 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
912 : #[derive(Debug)]
913 : pub(crate) struct EvictionsWithLowResidenceDuration {
914 : data_source: &'static str,
915 : threshold: Duration,
916 : counter: Option<IntCounter>,
917 : }
918 :
919 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
920 : data_source: &'static str,
921 : threshold: Duration,
922 : }
923 :
924 : impl EvictionsWithLowResidenceDurationBuilder {
925 390 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
926 390 : Self {
927 390 : data_source,
928 390 : threshold,
929 390 : }
930 390 : }
931 :
932 390 : fn build(
933 390 : &self,
934 390 : tenant_id: &str,
935 390 : shard_id: &str,
936 390 : timeline_id: &str,
937 390 : ) -> EvictionsWithLowResidenceDuration {
938 390 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
939 390 : .get_metric_with_label_values(&[
940 390 : tenant_id,
941 390 : shard_id,
942 390 : timeline_id,
943 390 : self.data_source,
944 390 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
945 390 : ])
946 390 : .unwrap();
947 390 : EvictionsWithLowResidenceDuration {
948 390 : data_source: self.data_source,
949 390 : threshold: self.threshold,
950 390 : counter: Some(counter),
951 390 : }
952 390 : }
953 : }
954 :
955 : impl EvictionsWithLowResidenceDuration {
956 398 : fn threshold_label_value(threshold: Duration) -> String {
957 398 : format!("{}", threshold.as_secs())
958 398 : }
959 :
960 4 : pub fn observe(&self, observed_value: Duration) {
961 4 : if observed_value < self.threshold {
962 4 : self.counter
963 4 : .as_ref()
964 4 : .expect("nobody calls this function after `remove_from_vec`")
965 4 : .inc();
966 4 : }
967 4 : }
968 :
969 8 : pub fn change_threshold(
970 8 : &mut self,
971 8 : tenant_id: &str,
972 8 : shard_id: &str,
973 8 : timeline_id: &str,
974 8 : new_threshold: Duration,
975 8 : ) {
976 8 : if new_threshold == self.threshold {
977 8 : return;
978 0 : }
979 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
980 0 : self.data_source,
981 0 : new_threshold,
982 0 : )
983 0 : .build(tenant_id, shard_id, timeline_id);
984 0 : std::mem::swap(self, &mut with_new);
985 0 : with_new.remove(tenant_id, shard_id, timeline_id);
986 8 : }
987 :
988 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
989 8 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
990 8 : let Some(_counter) = self.counter.take() else {
991 0 : return;
992 : };
993 :
994 8 : let threshold = Self::threshold_label_value(self.threshold);
995 8 :
996 8 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
997 8 : tenant_id,
998 8 : shard_id,
999 8 : timeline_id,
1000 8 : self.data_source,
1001 8 : &threshold,
1002 8 : ]);
1003 8 :
1004 8 : match removed {
1005 0 : Err(e) => {
1006 0 : // this has been hit in staging as
1007 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1008 0 : // because we can be in the drop path already, don't risk:
1009 0 : // - "double-panic => illegal instruction" or
1010 0 : // - future "drop panick => abort"
1011 0 : //
1012 0 : // so just nag: (the error has the labels)
1013 0 : tracing::warn!("failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}");
1014 : }
1015 : Ok(()) => {
1016 : // to help identify cases where we double-remove the same values, let's log all
1017 : // deletions?
1018 8 : tracing::info!("removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}", self.data_source);
1019 : }
1020 : }
1021 8 : }
1022 : }
1023 :
1024 : // Metrics collected on disk IO operations
1025 : //
1026 : // Roughly logarithmic scale.
1027 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1028 : 0.000030, // 30 usec
1029 : 0.001000, // 1000 usec
1030 : 0.030, // 30 ms
1031 : 1.000, // 1000 ms
1032 : 30.000, // 30000 ms
1033 : ];
1034 :
1035 : /// VirtualFile fs operation variants.
1036 : ///
1037 : /// Operations:
1038 : /// - open ([`std::fs::OpenOptions::open`])
1039 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1040 : /// - close-by-replace (close by replacement algorithm)
1041 : /// - read (`read_at`)
1042 : /// - write (`write_at`)
1043 : /// - seek (modify internal position or file length query)
1044 : /// - fsync ([`std::fs::File::sync_all`])
1045 : /// - metadata ([`std::fs::File::metadata`])
1046 : #[derive(
1047 1548 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1048 : )]
1049 : pub(crate) enum StorageIoOperation {
1050 : Open,
1051 : OpenAfterReplace,
1052 : Close,
1053 : CloseByReplace,
1054 : Read,
1055 : Write,
1056 : Seek,
1057 : Fsync,
1058 : Metadata,
1059 : }
1060 :
1061 : impl StorageIoOperation {
1062 1548 : pub fn as_str(&self) -> &'static str {
1063 1548 : match self {
1064 172 : StorageIoOperation::Open => "open",
1065 172 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1066 172 : StorageIoOperation::Close => "close",
1067 172 : StorageIoOperation::CloseByReplace => "close-by-replace",
1068 172 : StorageIoOperation::Read => "read",
1069 172 : StorageIoOperation::Write => "write",
1070 172 : StorageIoOperation::Seek => "seek",
1071 172 : StorageIoOperation::Fsync => "fsync",
1072 172 : StorageIoOperation::Metadata => "metadata",
1073 : }
1074 1548 : }
1075 : }
1076 :
1077 : /// Tracks time taken by fs operations near VirtualFile.
1078 : #[derive(Debug)]
1079 : pub(crate) struct StorageIoTime {
1080 : metrics: [Histogram; StorageIoOperation::COUNT],
1081 : }
1082 :
1083 : impl StorageIoTime {
1084 172 : fn new() -> Self {
1085 172 : let storage_io_histogram_vec = register_histogram_vec!(
1086 : "pageserver_io_operations_seconds",
1087 : "Time spent in IO operations",
1088 : &["operation"],
1089 : STORAGE_IO_TIME_BUCKETS.into()
1090 : )
1091 172 : .expect("failed to define a metric");
1092 1548 : let metrics = std::array::from_fn(|i| {
1093 1548 : let op = StorageIoOperation::from_repr(i).unwrap();
1094 1548 : storage_io_histogram_vec
1095 1548 : .get_metric_with_label_values(&[op.as_str()])
1096 1548 : .unwrap()
1097 1548 : });
1098 172 : Self { metrics }
1099 172 : }
1100 :
1101 1926627 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1102 1926627 : &self.metrics[op as usize]
1103 1926627 : }
1104 : }
1105 :
1106 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1107 :
1108 : const STORAGE_IO_SIZE_OPERATIONS: &[&str] = &["read", "write"];
1109 :
1110 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1111 170 : pub(crate) static STORAGE_IO_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1112 : register_int_gauge_vec!(
1113 : "pageserver_io_operations_bytes_total",
1114 : "Total amount of bytes read/written in IO operations",
1115 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1116 : )
1117 170 : .expect("failed to define a metric")
1118 170 : });
1119 :
1120 : #[cfg(not(test))]
1121 : pub(crate) mod virtual_file_descriptor_cache {
1122 : use super::*;
1123 :
1124 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1125 : register_uint_gauge!(
1126 : "pageserver_virtual_file_descriptor_cache_size_max",
1127 : "Maximum number of open file descriptors in the cache."
1128 : )
1129 0 : .unwrap()
1130 0 : });
1131 :
1132 : // SIZE_CURRENT: derive it like so:
1133 : // ```
1134 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1135 : // -ignoring(operation)
1136 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1137 : // ```
1138 : }
1139 :
1140 : #[cfg(not(test))]
1141 : pub(crate) mod virtual_file_io_engine {
1142 : use super::*;
1143 :
1144 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1145 : register_uint_gauge_vec!(
1146 : "pageserver_virtual_file_io_engine_kind",
1147 : "The configured io engine for VirtualFile",
1148 : &["kind"],
1149 : )
1150 0 : .unwrap()
1151 0 : });
1152 : }
1153 :
1154 : struct GlobalAndPerTimelineHistogramTimer<'a, 'c> {
1155 : global_metric: &'a Histogram,
1156 :
1157 : // Optional because not all op types are tracked per-timeline
1158 : timeline_metric: Option<&'a Histogram>,
1159 :
1160 : ctx: &'c RequestContext,
1161 : start: std::time::Instant,
1162 : op: SmgrQueryType,
1163 : }
1164 :
1165 : impl<'a, 'c> Drop for GlobalAndPerTimelineHistogramTimer<'a, 'c> {
1166 10 : fn drop(&mut self) {
1167 10 : let elapsed = self.start.elapsed();
1168 10 : let ex_throttled = self
1169 10 : .ctx
1170 10 : .micros_spent_throttled
1171 10 : .close_and_checked_sub_from(elapsed);
1172 10 : let ex_throttled = match ex_throttled {
1173 10 : Ok(res) => res,
1174 0 : Err(error) => {
1175 0 : use utils::rate_limit::RateLimit;
1176 0 : static LOGGED: Lazy<Mutex<enum_map::EnumMap<SmgrQueryType, RateLimit>>> =
1177 0 : Lazy::new(|| {
1178 0 : Mutex::new(enum_map::EnumMap::from_array(std::array::from_fn(|_| {
1179 0 : RateLimit::new(Duration::from_secs(10))
1180 0 : })))
1181 0 : });
1182 0 : let mut guard = LOGGED.lock().unwrap();
1183 0 : let rate_limit = &mut guard[self.op];
1184 0 : rate_limit.call(|| {
1185 0 : warn!(op=?self.op, error, "error deducting time spent throttled; this message is logged at a global rate limit");
1186 0 : });
1187 0 : elapsed
1188 : }
1189 : };
1190 10 : self.global_metric.observe(ex_throttled.as_secs_f64());
1191 10 : if let Some(timeline_metric) = self.timeline_metric {
1192 2 : timeline_metric.observe(ex_throttled.as_secs_f64());
1193 8 : }
1194 10 : }
1195 : }
1196 :
1197 : #[derive(
1198 : Debug,
1199 : Clone,
1200 : Copy,
1201 2418 : IntoStaticStr,
1202 : strum_macros::EnumCount,
1203 24 : strum_macros::EnumIter,
1204 2000 : strum_macros::FromRepr,
1205 : enum_map::Enum,
1206 : )]
1207 : #[strum(serialize_all = "snake_case")]
1208 : pub enum SmgrQueryType {
1209 : GetRelExists,
1210 : GetRelSize,
1211 : GetPageAtLsn,
1212 : GetDbSize,
1213 : GetSlruSegment,
1214 : }
1215 :
1216 : #[derive(Debug)]
1217 : pub(crate) struct SmgrQueryTimePerTimeline {
1218 : global_metrics: [Histogram; SmgrQueryType::COUNT],
1219 : per_timeline_getpage: Histogram,
1220 : }
1221 :
1222 150 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1223 : register_histogram_vec!(
1224 : "pageserver_smgr_query_seconds",
1225 : "Time spent on smgr query handling, aggegated by query type and tenant/timeline.",
1226 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1227 : CRITICAL_OP_BUCKETS.into(),
1228 : )
1229 150 : .expect("failed to define a metric")
1230 150 : });
1231 :
1232 150 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1233 150 : [
1234 150 : 1,
1235 150 : 10,
1236 150 : 20,
1237 150 : 40,
1238 150 : 60,
1239 150 : 80,
1240 150 : 100,
1241 150 : 200,
1242 150 : 300,
1243 150 : 400,
1244 150 : 500,
1245 150 : 600,
1246 150 : 700,
1247 150 : 800,
1248 150 : 900,
1249 150 : 1_000, // 1ms
1250 150 : 2_000,
1251 150 : 4_000,
1252 150 : 6_000,
1253 150 : 8_000,
1254 150 : 10_000, // 10ms
1255 150 : 20_000,
1256 150 : 40_000,
1257 150 : 60_000,
1258 150 : 80_000,
1259 150 : 100_000,
1260 150 : 200_000,
1261 150 : 400_000,
1262 150 : 600_000,
1263 150 : 800_000,
1264 150 : 1_000_000, // 1s
1265 150 : 2_000_000,
1266 150 : 4_000_000,
1267 150 : 6_000_000,
1268 150 : 8_000_000,
1269 150 : 10_000_000, // 10s
1270 150 : 20_000_000,
1271 150 : 50_000_000,
1272 150 : 100_000_000,
1273 150 : 200_000_000,
1274 150 : 1_000_000_000, // 1000s
1275 150 : ]
1276 150 : .into_iter()
1277 150 : .map(Duration::from_micros)
1278 6150 : .map(|d| d.as_secs_f64())
1279 150 : .collect()
1280 150 : });
1281 :
1282 150 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1283 : register_histogram_vec!(
1284 : "pageserver_smgr_query_seconds_global",
1285 : "Time spent on smgr query handling, aggregated by query type.",
1286 : &["smgr_query_type"],
1287 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1288 : )
1289 150 : .expect("failed to define a metric")
1290 150 : });
1291 :
1292 : impl SmgrQueryTimePerTimeline {
1293 400 : pub(crate) fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
1294 400 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1295 400 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1296 400 : let timeline_id = timeline_id.to_string();
1297 2000 : let global_metrics = std::array::from_fn(|i| {
1298 2000 : let op = SmgrQueryType::from_repr(i).unwrap();
1299 2000 : SMGR_QUERY_TIME_GLOBAL
1300 2000 : .get_metric_with_label_values(&[op.into()])
1301 2000 : .unwrap()
1302 2000 : });
1303 400 :
1304 400 : let per_timeline_getpage = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1305 400 : .get_metric_with_label_values(&[
1306 400 : SmgrQueryType::GetPageAtLsn.into(),
1307 400 : &tenant_id,
1308 400 : &shard_slug,
1309 400 : &timeline_id,
1310 400 : ])
1311 400 : .unwrap();
1312 400 : Self {
1313 400 : global_metrics,
1314 400 : per_timeline_getpage,
1315 400 : }
1316 400 : }
1317 10 : pub(crate) fn start_timer<'c: 'a, 'a>(
1318 10 : &'a self,
1319 10 : op: SmgrQueryType,
1320 10 : ctx: &'c RequestContext,
1321 10 : ) -> Option<impl Drop + '_> {
1322 10 : let global_metric = &self.global_metrics[op as usize];
1323 10 : let start = Instant::now();
1324 10 : match ctx.micros_spent_throttled.open() {
1325 10 : Ok(()) => (),
1326 0 : Err(error) => {
1327 0 : use utils::rate_limit::RateLimit;
1328 0 : static LOGGED: Lazy<Mutex<enum_map::EnumMap<SmgrQueryType, RateLimit>>> =
1329 0 : Lazy::new(|| {
1330 0 : Mutex::new(enum_map::EnumMap::from_array(std::array::from_fn(|_| {
1331 0 : RateLimit::new(Duration::from_secs(10))
1332 0 : })))
1333 0 : });
1334 0 : let mut guard = LOGGED.lock().unwrap();
1335 0 : let rate_limit = &mut guard[op];
1336 0 : rate_limit.call(|| {
1337 0 : warn!(?op, error, "error opening micros_spent_throttled; this message is logged at a global rate limit");
1338 0 : });
1339 0 : }
1340 : }
1341 :
1342 10 : let timeline_metric = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1343 2 : Some(&self.per_timeline_getpage)
1344 : } else {
1345 8 : None
1346 : };
1347 :
1348 10 : Some(GlobalAndPerTimelineHistogramTimer {
1349 10 : global_metric,
1350 10 : timeline_metric,
1351 10 : ctx,
1352 10 : start,
1353 10 : op,
1354 10 : })
1355 10 : }
1356 : }
1357 :
1358 : #[cfg(test)]
1359 : mod smgr_query_time_tests {
1360 : use pageserver_api::shard::TenantShardId;
1361 : use strum::IntoEnumIterator;
1362 : use utils::id::{TenantId, TimelineId};
1363 :
1364 : use crate::{
1365 : context::{DownloadBehavior, RequestContext},
1366 : task_mgr::TaskKind,
1367 : };
1368 :
1369 : // Regression test, we used hard-coded string constants before using an enum.
1370 : #[test]
1371 2 : fn op_label_name() {
1372 2 : use super::SmgrQueryType::*;
1373 2 : let expect: [(super::SmgrQueryType, &'static str); 5] = [
1374 2 : (GetRelExists, "get_rel_exists"),
1375 2 : (GetRelSize, "get_rel_size"),
1376 2 : (GetPageAtLsn, "get_page_at_lsn"),
1377 2 : (GetDbSize, "get_db_size"),
1378 2 : (GetSlruSegment, "get_slru_segment"),
1379 2 : ];
1380 12 : for (op, expect) in expect {
1381 10 : let actual: &'static str = op.into();
1382 10 : assert_eq!(actual, expect);
1383 : }
1384 2 : }
1385 :
1386 : #[test]
1387 2 : fn basic() {
1388 2 : let ops: Vec<_> = super::SmgrQueryType::iter().collect();
1389 :
1390 12 : for op in &ops {
1391 10 : let tenant_id = TenantId::generate();
1392 10 : let timeline_id = TimelineId::generate();
1393 10 : let metrics = super::SmgrQueryTimePerTimeline::new(
1394 10 : &TenantShardId::unsharded(tenant_id),
1395 10 : &timeline_id,
1396 10 : );
1397 10 :
1398 20 : let get_counts = || {
1399 20 : let global: u64 = ops
1400 20 : .iter()
1401 100 : .map(|op| metrics.global_metrics[*op as usize].get_sample_count())
1402 20 : .sum();
1403 20 : (global, metrics.per_timeline_getpage.get_sample_count())
1404 20 : };
1405 :
1406 10 : let (pre_global, pre_per_tenant_timeline) = get_counts();
1407 10 : assert_eq!(pre_per_tenant_timeline, 0);
1408 :
1409 10 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Download);
1410 10 : let timer = metrics.start_timer(*op, &ctx);
1411 10 : drop(timer);
1412 10 :
1413 10 : let (post_global, post_per_tenant_timeline) = get_counts();
1414 10 : if matches!(op, super::SmgrQueryType::GetPageAtLsn) {
1415 : // getpage ops are tracked per-timeline, others aren't
1416 2 : assert_eq!(post_per_tenant_timeline, 1);
1417 : } else {
1418 8 : assert_eq!(post_per_tenant_timeline, 0);
1419 : }
1420 10 : assert!(post_global > pre_global);
1421 : }
1422 2 : }
1423 : }
1424 :
1425 : // keep in sync with control plane Go code so that we can validate
1426 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1427 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1428 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1429 0 : [
1430 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1431 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1432 0 : ]
1433 0 : .map(|ms| (ms as f64) / 1000.0)
1434 0 : });
1435 :
1436 : pub(crate) struct BasebackupQueryTime {
1437 : ok: Histogram,
1438 : error: Histogram,
1439 : }
1440 :
1441 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1442 0 : let vec = register_histogram_vec!(
1443 : "pageserver_basebackup_query_seconds",
1444 : "Histogram of basebackup queries durations, by result type",
1445 : &["result"],
1446 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1447 : )
1448 0 : .expect("failed to define a metric");
1449 0 : BasebackupQueryTime {
1450 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1451 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1452 0 : }
1453 0 : });
1454 :
1455 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a, 'c> {
1456 : parent: &'a BasebackupQueryTime,
1457 : ctx: &'c RequestContext,
1458 : start: std::time::Instant,
1459 : }
1460 :
1461 : impl BasebackupQueryTime {
1462 0 : pub(crate) fn start_recording<'c: 'a, 'a>(
1463 0 : &'a self,
1464 0 : ctx: &'c RequestContext,
1465 0 : ) -> BasebackupQueryTimeOngoingRecording<'_, '_> {
1466 0 : let start = Instant::now();
1467 0 : match ctx.micros_spent_throttled.open() {
1468 0 : Ok(()) => (),
1469 0 : Err(error) => {
1470 0 : use utils::rate_limit::RateLimit;
1471 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1472 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1473 0 : let mut rate_limit = LOGGED.lock().unwrap();
1474 0 : rate_limit.call(|| {
1475 0 : warn!(error, "error opening micros_spent_throttled; this message is logged at a global rate limit");
1476 0 : });
1477 0 : }
1478 : }
1479 0 : BasebackupQueryTimeOngoingRecording {
1480 0 : parent: self,
1481 0 : ctx,
1482 0 : start,
1483 0 : }
1484 0 : }
1485 : }
1486 :
1487 : impl<'a, 'c> BasebackupQueryTimeOngoingRecording<'a, 'c> {
1488 0 : pub(crate) fn observe<T, E>(self, res: &Result<T, E>) {
1489 0 : let elapsed = self.start.elapsed();
1490 0 : let ex_throttled = self
1491 0 : .ctx
1492 0 : .micros_spent_throttled
1493 0 : .close_and_checked_sub_from(elapsed);
1494 0 : let ex_throttled = match ex_throttled {
1495 0 : Ok(ex_throttled) => ex_throttled,
1496 0 : Err(error) => {
1497 0 : use utils::rate_limit::RateLimit;
1498 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1499 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1500 0 : let mut rate_limit = LOGGED.lock().unwrap();
1501 0 : rate_limit.call(|| {
1502 0 : warn!(error, "error deducting time spent throttled; this message is logged at a global rate limit");
1503 0 : });
1504 0 : elapsed
1505 : }
1506 : };
1507 0 : let metric = if res.is_ok() {
1508 0 : &self.parent.ok
1509 : } else {
1510 0 : &self.parent.error
1511 : };
1512 0 : metric.observe(ex_throttled.as_secs_f64());
1513 0 : }
1514 : }
1515 :
1516 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1517 : register_int_counter_pair_vec!(
1518 : "pageserver_live_connections_started",
1519 : "Number of network connections that we started handling",
1520 : "pageserver_live_connections_finished",
1521 : "Number of network connections that we finished handling",
1522 : &["pageserver_connection_kind"]
1523 : )
1524 0 : .expect("failed to define a metric")
1525 0 : });
1526 :
1527 0 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
1528 : pub(crate) enum ComputeCommandKind {
1529 : PageStreamV2,
1530 : PageStream,
1531 : Basebackup,
1532 : Fullbackup,
1533 : LeaseLsn,
1534 : }
1535 :
1536 : pub(crate) struct ComputeCommandCounters {
1537 : map: EnumMap<ComputeCommandKind, IntCounter>,
1538 : }
1539 :
1540 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
1541 0 : let inner = register_int_counter_vec!(
1542 : "pageserver_compute_commands",
1543 : "Number of compute -> pageserver commands processed",
1544 : &["command"]
1545 : )
1546 0 : .expect("failed to define a metric");
1547 0 :
1548 0 : ComputeCommandCounters {
1549 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
1550 0 : let command = <ComputeCommandKind as enum_map::Enum>::from_usize(i);
1551 0 : let command_str: &'static str = command.into();
1552 0 : inner.with_label_values(&[command_str])
1553 0 : })),
1554 0 : }
1555 0 : });
1556 :
1557 : impl ComputeCommandCounters {
1558 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
1559 0 : &self.map[command]
1560 0 : }
1561 : }
1562 :
1563 : // remote storage metrics
1564 :
1565 146 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1566 : register_int_counter_pair_vec!(
1567 : "pageserver_remote_timeline_client_calls_started",
1568 : "Number of started calls to remote timeline client.",
1569 : "pageserver_remote_timeline_client_calls_finished",
1570 : "Number of finshed calls to remote timeline client.",
1571 : &[
1572 : "tenant_id",
1573 : "shard_id",
1574 : "timeline_id",
1575 : "file_kind",
1576 : "op_kind"
1577 : ],
1578 : )
1579 146 : .unwrap()
1580 146 : });
1581 :
1582 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
1583 142 : Lazy::new(|| {
1584 : register_int_counter_vec!(
1585 : "pageserver_remote_timeline_client_bytes_started",
1586 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1587 : The increment happens when the operation is scheduled.",
1588 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1589 : )
1590 142 : .expect("failed to define a metric")
1591 142 : });
1592 :
1593 142 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
1594 : register_int_counter_vec!(
1595 : "pageserver_remote_timeline_client_bytes_finished",
1596 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1597 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
1598 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1599 : )
1600 142 : .expect("failed to define a metric")
1601 142 : });
1602 :
1603 : pub(crate) struct TenantManagerMetrics {
1604 : tenant_slots_attached: UIntGauge,
1605 : tenant_slots_secondary: UIntGauge,
1606 : tenant_slots_inprogress: UIntGauge,
1607 : pub(crate) tenant_slot_writes: IntCounter,
1608 : pub(crate) unexpected_errors: IntCounter,
1609 : }
1610 :
1611 : impl TenantManagerMetrics {
1612 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
1613 : /// exactly: they track the lifetime of the slots _in the tenant map_.
1614 2 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
1615 2 : match slot {
1616 0 : TenantSlot::Attached(_) => {
1617 0 : self.tenant_slots_attached.inc();
1618 0 : }
1619 0 : TenantSlot::Secondary(_) => {
1620 0 : self.tenant_slots_secondary.inc();
1621 0 : }
1622 2 : TenantSlot::InProgress(_) => {
1623 2 : self.tenant_slots_inprogress.inc();
1624 2 : }
1625 : }
1626 2 : }
1627 :
1628 2 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
1629 2 : match slot {
1630 2 : TenantSlot::Attached(_) => {
1631 2 : self.tenant_slots_attached.dec();
1632 2 : }
1633 0 : TenantSlot::Secondary(_) => {
1634 0 : self.tenant_slots_secondary.dec();
1635 0 : }
1636 0 : TenantSlot::InProgress(_) => {
1637 0 : self.tenant_slots_inprogress.dec();
1638 0 : }
1639 : }
1640 2 : }
1641 :
1642 : #[cfg(all(debug_assertions, not(test)))]
1643 0 : pub(crate) fn slots_total(&self) -> u64 {
1644 0 : self.tenant_slots_attached.get()
1645 0 : + self.tenant_slots_secondary.get()
1646 0 : + self.tenant_slots_inprogress.get()
1647 0 : }
1648 : }
1649 :
1650 2 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
1651 2 : let tenant_slots = register_uint_gauge_vec!(
1652 : "pageserver_tenant_manager_slots",
1653 : "How many slots currently exist, including all attached, secondary and in-progress operations",
1654 : &["mode"]
1655 : )
1656 2 : .expect("failed to define a metric");
1657 2 : TenantManagerMetrics {
1658 2 : tenant_slots_attached: tenant_slots
1659 2 : .get_metric_with_label_values(&["attached"])
1660 2 : .unwrap(),
1661 2 : tenant_slots_secondary: tenant_slots
1662 2 : .get_metric_with_label_values(&["secondary"])
1663 2 : .unwrap(),
1664 2 : tenant_slots_inprogress: tenant_slots
1665 2 : .get_metric_with_label_values(&["inprogress"])
1666 2 : .unwrap(),
1667 2 : tenant_slot_writes: register_int_counter!(
1668 : "pageserver_tenant_manager_slot_writes",
1669 : "Writes to a tenant slot, including all of create/attach/detach/delete"
1670 2 : )
1671 2 : .expect("failed to define a metric"),
1672 2 : unexpected_errors: register_int_counter!(
1673 : "pageserver_tenant_manager_unexpected_errors_total",
1674 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
1675 2 : )
1676 2 : .expect("failed to define a metric"),
1677 2 : }
1678 2 : });
1679 :
1680 : pub(crate) struct DeletionQueueMetrics {
1681 : pub(crate) keys_submitted: IntCounter,
1682 : pub(crate) keys_dropped: IntCounter,
1683 : pub(crate) keys_executed: IntCounter,
1684 : pub(crate) keys_validated: IntCounter,
1685 : pub(crate) dropped_lsn_updates: IntCounter,
1686 : pub(crate) unexpected_errors: IntCounter,
1687 : pub(crate) remote_errors: IntCounterVec,
1688 : }
1689 22 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
1690 22 : DeletionQueueMetrics{
1691 22 :
1692 22 : keys_submitted: register_int_counter!(
1693 : "pageserver_deletion_queue_submitted_total",
1694 : "Number of objects submitted for deletion"
1695 22 : )
1696 22 : .expect("failed to define a metric"),
1697 22 :
1698 22 : keys_dropped: register_int_counter!(
1699 : "pageserver_deletion_queue_dropped_total",
1700 : "Number of object deletions dropped due to stale generation."
1701 22 : )
1702 22 : .expect("failed to define a metric"),
1703 22 :
1704 22 : keys_executed: register_int_counter!(
1705 : "pageserver_deletion_queue_executed_total",
1706 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
1707 22 : )
1708 22 : .expect("failed to define a metric"),
1709 22 :
1710 22 : keys_validated: register_int_counter!(
1711 : "pageserver_deletion_queue_validated_total",
1712 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
1713 22 : )
1714 22 : .expect("failed to define a metric"),
1715 22 :
1716 22 : dropped_lsn_updates: register_int_counter!(
1717 : "pageserver_deletion_queue_dropped_lsn_updates_total",
1718 : "Updates to remote_consistent_lsn dropped due to stale generation number."
1719 22 : )
1720 22 : .expect("failed to define a metric"),
1721 22 : unexpected_errors: register_int_counter!(
1722 : "pageserver_deletion_queue_unexpected_errors_total",
1723 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
1724 22 : )
1725 22 : .expect("failed to define a metric"),
1726 22 : remote_errors: register_int_counter_vec!(
1727 : "pageserver_deletion_queue_remote_errors_total",
1728 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
1729 : &["op_kind"],
1730 22 : )
1731 22 : .expect("failed to define a metric")
1732 22 : }
1733 22 : });
1734 :
1735 : pub(crate) struct SecondaryModeMetrics {
1736 : pub(crate) upload_heatmap: IntCounter,
1737 : pub(crate) upload_heatmap_errors: IntCounter,
1738 : pub(crate) upload_heatmap_duration: Histogram,
1739 : pub(crate) download_heatmap: IntCounter,
1740 : pub(crate) download_layer: IntCounter,
1741 : }
1742 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
1743 0 : SecondaryModeMetrics {
1744 0 : upload_heatmap: register_int_counter!(
1745 : "pageserver_secondary_upload_heatmap",
1746 : "Number of heatmaps written to remote storage by attached tenants"
1747 0 : )
1748 0 : .expect("failed to define a metric"),
1749 0 : upload_heatmap_errors: register_int_counter!(
1750 : "pageserver_secondary_upload_heatmap_errors",
1751 : "Failures writing heatmap to remote storage"
1752 0 : )
1753 0 : .expect("failed to define a metric"),
1754 0 : upload_heatmap_duration: register_histogram!(
1755 : "pageserver_secondary_upload_heatmap_duration",
1756 : "Time to build and upload a heatmap, including any waiting inside the S3 client"
1757 0 : )
1758 0 : .expect("failed to define a metric"),
1759 0 : download_heatmap: register_int_counter!(
1760 : "pageserver_secondary_download_heatmap",
1761 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
1762 0 : )
1763 0 : .expect("failed to define a metric"),
1764 0 : download_layer: register_int_counter!(
1765 : "pageserver_secondary_download_layer",
1766 : "Number of downloads of layers by secondary mode locations"
1767 0 : )
1768 0 : .expect("failed to define a metric"),
1769 0 : }
1770 0 : });
1771 :
1772 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1773 : register_uint_gauge_vec!(
1774 : "pageserver_secondary_resident_physical_size",
1775 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
1776 : &["tenant_id", "shard_id"]
1777 : )
1778 0 : .expect("failed to define a metric")
1779 0 : });
1780 :
1781 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1782 : pub enum RemoteOpKind {
1783 : Upload,
1784 : Download,
1785 : Delete,
1786 : }
1787 : impl RemoteOpKind {
1788 13148 : pub fn as_str(&self) -> &'static str {
1789 13148 : match self {
1790 12444 : Self::Upload => "upload",
1791 52 : Self::Download => "download",
1792 652 : Self::Delete => "delete",
1793 : }
1794 13148 : }
1795 : }
1796 :
1797 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
1798 : pub enum RemoteOpFileKind {
1799 : Layer,
1800 : Index,
1801 : }
1802 : impl RemoteOpFileKind {
1803 13148 : pub fn as_str(&self) -> &'static str {
1804 13148 : match self {
1805 8962 : Self::Layer => "layer",
1806 4186 : Self::Index => "index",
1807 : }
1808 13148 : }
1809 : }
1810 :
1811 140 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1812 : register_histogram_vec!(
1813 : "pageserver_remote_operation_seconds",
1814 : "Time spent on remote storage operations. \
1815 : Grouped by tenant, timeline, operation_kind and status. \
1816 : Does not account for time spent waiting in remote timeline client's queues.",
1817 : &["file_kind", "op_kind", "status"]
1818 : )
1819 140 : .expect("failed to define a metric")
1820 140 : });
1821 :
1822 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
1823 : register_int_counter_vec!(
1824 : "pageserver_tenant_task_events",
1825 : "Number of task start/stop/fail events.",
1826 : &["event"],
1827 : )
1828 0 : .expect("Failed to register tenant_task_events metric")
1829 0 : });
1830 :
1831 20 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE_WAIT_GAUGE: Lazy<IntCounterPairVec> = Lazy::new(|| {
1832 : register_int_counter_pair_vec!(
1833 : "pageserver_background_loop_semaphore_wait_start_count",
1834 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
1835 : "pageserver_background_loop_semaphore_wait_finish_count",
1836 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
1837 : &["task"],
1838 : )
1839 20 : .unwrap()
1840 20 : });
1841 :
1842 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
1843 : register_int_counter_vec!(
1844 : "pageserver_background_loop_period_overrun_count",
1845 : "Incremented whenever warn_when_period_overrun() logs a warning.",
1846 : &["task", "period"],
1847 : )
1848 0 : .expect("failed to define a metric")
1849 0 : });
1850 :
1851 : // walreceiver metrics
1852 :
1853 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
1854 : register_int_counter!(
1855 : "pageserver_walreceiver_started_connections_total",
1856 : "Number of started walreceiver connections"
1857 : )
1858 0 : .expect("failed to define a metric")
1859 0 : });
1860 :
1861 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
1862 : register_int_gauge!(
1863 : "pageserver_walreceiver_active_managers",
1864 : "Number of active walreceiver managers"
1865 : )
1866 0 : .expect("failed to define a metric")
1867 0 : });
1868 :
1869 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
1870 : register_int_counter_vec!(
1871 : "pageserver_walreceiver_switches_total",
1872 : "Number of walreceiver manager change_connection calls",
1873 : &["reason"]
1874 : )
1875 0 : .expect("failed to define a metric")
1876 0 : });
1877 :
1878 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
1879 : register_int_counter!(
1880 : "pageserver_walreceiver_broker_updates_total",
1881 : "Number of received broker updates in walreceiver"
1882 : )
1883 0 : .expect("failed to define a metric")
1884 0 : });
1885 :
1886 2 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
1887 : register_int_counter_vec!(
1888 : "pageserver_walreceiver_candidates_events_total",
1889 : "Number of walreceiver candidate events",
1890 : &["event"]
1891 : )
1892 2 : .expect("failed to define a metric")
1893 2 : });
1894 :
1895 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
1896 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
1897 :
1898 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
1899 2 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
1900 :
1901 : // Metrics collected on WAL redo operations
1902 : //
1903 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
1904 : // for access to the postgres process ('wait') since there is only one for
1905 : // each tenant.
1906 :
1907 : /// Time buckets are small because we want to be able to measure the
1908 : /// smallest redo processing times. These buckets allow us to measure down
1909 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
1910 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
1911 : ///
1912 : /// Values up to 1s are recorded because metrics show that we have redo
1913 : /// durations and lock times larger than 0.250s.
1914 : macro_rules! redo_histogram_time_buckets {
1915 : () => {
1916 : vec![
1917 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
1918 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
1919 : 1.000_000,
1920 : ]
1921 : };
1922 : }
1923 :
1924 : /// While we're at it, also measure the amount of records replayed in each
1925 : /// operation. We have a global 'total replayed' counter, but that's not
1926 : /// as useful as 'what is the skew for how many records we replay in one
1927 : /// operation'.
1928 : macro_rules! redo_histogram_count_buckets {
1929 : () => {
1930 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
1931 : };
1932 : }
1933 :
1934 : macro_rules! redo_bytes_histogram_count_buckets {
1935 : () => {
1936 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
1937 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
1938 : vec![
1939 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
1940 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
1941 : ]
1942 : };
1943 : }
1944 :
1945 : pub(crate) struct WalIngestMetrics {
1946 : pub(crate) bytes_received: IntCounter,
1947 : pub(crate) records_received: IntCounter,
1948 : pub(crate) records_committed: IntCounter,
1949 : pub(crate) records_filtered: IntCounter,
1950 : }
1951 :
1952 2 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| WalIngestMetrics {
1953 : bytes_received: register_int_counter!(
1954 : "pageserver_wal_ingest_bytes_received",
1955 : "Bytes of WAL ingested from safekeepers",
1956 : )
1957 2 : .unwrap(),
1958 : records_received: register_int_counter!(
1959 : "pageserver_wal_ingest_records_received",
1960 : "Number of WAL records received from safekeepers"
1961 : )
1962 2 : .expect("failed to define a metric"),
1963 : records_committed: register_int_counter!(
1964 : "pageserver_wal_ingest_records_committed",
1965 : "Number of WAL records which resulted in writes to pageserver storage"
1966 : )
1967 2 : .expect("failed to define a metric"),
1968 : records_filtered: register_int_counter!(
1969 : "pageserver_wal_ingest_records_filtered",
1970 : "Number of WAL records filtered out due to sharding"
1971 : )
1972 2 : .expect("failed to define a metric"),
1973 2 : });
1974 :
1975 6 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
1976 : register_histogram!(
1977 : "pageserver_wal_redo_seconds",
1978 : "Time spent on WAL redo",
1979 : redo_histogram_time_buckets!()
1980 : )
1981 6 : .expect("failed to define a metric")
1982 6 : });
1983 :
1984 6 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
1985 : register_histogram!(
1986 : "pageserver_wal_redo_records_histogram",
1987 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
1988 : redo_histogram_count_buckets!(),
1989 : )
1990 6 : .expect("failed to define a metric")
1991 6 : });
1992 :
1993 6 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
1994 : register_histogram!(
1995 : "pageserver_wal_redo_bytes_histogram",
1996 : "Histogram of number of records replayed per redo sent to Postgres",
1997 : redo_bytes_histogram_count_buckets!(),
1998 : )
1999 6 : .expect("failed to define a metric")
2000 6 : });
2001 :
2002 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2003 6 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2004 : register_int_counter!(
2005 : "pageserver_replayed_wal_records_total",
2006 : "Number of WAL records replayed in WAL redo process"
2007 : )
2008 6 : .unwrap()
2009 6 : });
2010 :
2011 : #[rustfmt::skip]
2012 6 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2013 : register_histogram!(
2014 : "pageserver_wal_redo_process_launch_duration",
2015 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2016 : vec![
2017 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2018 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2019 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2020 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2021 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2022 : ],
2023 : )
2024 6 : .expect("failed to define a metric")
2025 6 : });
2026 :
2027 : pub(crate) struct WalRedoProcessCounters {
2028 : pub(crate) started: IntCounter,
2029 : pub(crate) killed_by_cause: enum_map::EnumMap<WalRedoKillCause, IntCounter>,
2030 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2031 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2032 : }
2033 :
2034 18 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2035 : pub(crate) enum WalRedoKillCause {
2036 : WalRedoProcessDrop,
2037 : NoLeakChildDrop,
2038 : Startup,
2039 : }
2040 :
2041 : impl Default for WalRedoProcessCounters {
2042 6 : fn default() -> Self {
2043 6 : let started = register_int_counter!(
2044 : "pageserver_wal_redo_process_started_total",
2045 : "Number of WAL redo processes started",
2046 : )
2047 6 : .unwrap();
2048 6 :
2049 6 : let killed = register_int_counter_vec!(
2050 : "pageserver_wal_redo_process_stopped_total",
2051 : "Number of WAL redo processes stopped",
2052 : &["cause"],
2053 : )
2054 6 : .unwrap();
2055 6 :
2056 6 : let active_stderr_logger_tasks_started = register_int_counter!(
2057 : "pageserver_walredo_stderr_logger_tasks_started_total",
2058 : "Number of active walredo stderr logger tasks that have started",
2059 : )
2060 6 : .unwrap();
2061 6 :
2062 6 : let active_stderr_logger_tasks_finished = register_int_counter!(
2063 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2064 : "Number of active walredo stderr logger tasks that have finished",
2065 : )
2066 6 : .unwrap();
2067 6 :
2068 6 : Self {
2069 6 : started,
2070 18 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2071 18 : let cause = <WalRedoKillCause as enum_map::Enum>::from_usize(i);
2072 18 : let cause_str: &'static str = cause.into();
2073 18 : killed.with_label_values(&[cause_str])
2074 18 : })),
2075 6 : active_stderr_logger_tasks_started,
2076 6 : active_stderr_logger_tasks_finished,
2077 6 : }
2078 6 : }
2079 : }
2080 :
2081 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2082 : Lazy::new(WalRedoProcessCounters::default);
2083 :
2084 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2085 : pub(crate) struct StorageTimeMetricsTimer {
2086 : metrics: StorageTimeMetrics,
2087 : start: Instant,
2088 : }
2089 :
2090 : impl StorageTimeMetricsTimer {
2091 4625 : fn new(metrics: StorageTimeMetrics) -> Self {
2092 4625 : Self {
2093 4625 : metrics,
2094 4625 : start: Instant::now(),
2095 4625 : }
2096 4625 : }
2097 :
2098 : /// Record the time from creation to now.
2099 3513 : pub fn stop_and_record(self) {
2100 3513 : let duration = self.start.elapsed().as_secs_f64();
2101 3513 : self.metrics.timeline_sum.inc_by(duration);
2102 3513 : self.metrics.timeline_count.inc();
2103 3513 : self.metrics.global_histogram.observe(duration);
2104 3513 : }
2105 :
2106 : /// Turns this timer into a timer, which will always record -- usually this means recording
2107 : /// regardless an early `?` path was taken in a function.
2108 754 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2109 754 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2110 754 : }
2111 : }
2112 :
2113 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2114 :
2115 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2116 754 : fn drop(&mut self) {
2117 754 : if let Some(inner) = self.0.take() {
2118 754 : inner.stop_and_record();
2119 754 : }
2120 754 : }
2121 : }
2122 :
2123 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2124 : /// timeline total sum and count.
2125 : #[derive(Clone, Debug)]
2126 : pub(crate) struct StorageTimeMetrics {
2127 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2128 : timeline_sum: Counter,
2129 : /// Number of oeprations, per operation, tenant_id and timeline_id
2130 : timeline_count: IntCounter,
2131 : /// Global histogram having only the "operation" label.
2132 : global_histogram: Histogram,
2133 : }
2134 :
2135 : impl StorageTimeMetrics {
2136 3120 : pub fn new(
2137 3120 : operation: StorageTimeOperation,
2138 3120 : tenant_id: &str,
2139 3120 : shard_id: &str,
2140 3120 : timeline_id: &str,
2141 3120 : ) -> Self {
2142 3120 : let operation: &'static str = operation.into();
2143 3120 :
2144 3120 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2145 3120 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2146 3120 : .unwrap();
2147 3120 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2148 3120 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2149 3120 : .unwrap();
2150 3120 : let global_histogram = STORAGE_TIME_GLOBAL
2151 3120 : .get_metric_with_label_values(&[operation])
2152 3120 : .unwrap();
2153 3120 :
2154 3120 : StorageTimeMetrics {
2155 3120 : timeline_sum,
2156 3120 : timeline_count,
2157 3120 : global_histogram,
2158 3120 : }
2159 3120 : }
2160 :
2161 : /// Starts timing a new operation.
2162 : ///
2163 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2164 4625 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2165 4625 : StorageTimeMetricsTimer::new(self.clone())
2166 4625 : }
2167 : }
2168 :
2169 : #[derive(Debug)]
2170 : pub(crate) struct TimelineMetrics {
2171 : tenant_id: String,
2172 : shard_id: String,
2173 : timeline_id: String,
2174 : pub flush_time_histo: StorageTimeMetrics,
2175 : pub compact_time_histo: StorageTimeMetrics,
2176 : pub create_images_time_histo: StorageTimeMetrics,
2177 : pub logical_size_histo: StorageTimeMetrics,
2178 : pub imitate_logical_size_histo: StorageTimeMetrics,
2179 : pub load_layer_map_histo: StorageTimeMetrics,
2180 : pub garbage_collect_histo: StorageTimeMetrics,
2181 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2182 : pub last_record_gauge: IntGauge,
2183 : pub pitr_history_size: UIntGauge,
2184 : pub archival_size: UIntGauge,
2185 : pub(crate) layer_size_image: UIntGauge,
2186 : pub(crate) layer_count_image: UIntGauge,
2187 : pub(crate) layer_size_delta: UIntGauge,
2188 : pub(crate) layer_count_delta: UIntGauge,
2189 : pub standby_horizon_gauge: IntGauge,
2190 : pub resident_physical_size_gauge: UIntGauge,
2191 : /// copy of LayeredTimeline.current_logical_size
2192 : pub current_logical_size_gauge: UIntGauge,
2193 : pub aux_file_size_gauge: IntGauge,
2194 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2195 : pub evictions: IntCounter,
2196 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2197 : /// Number of valid LSN leases.
2198 : pub valid_lsn_lease_count_gauge: UIntGauge,
2199 : shutdown: std::sync::atomic::AtomicBool,
2200 : }
2201 :
2202 : impl TimelineMetrics {
2203 390 : pub fn new(
2204 390 : tenant_shard_id: &TenantShardId,
2205 390 : timeline_id_raw: &TimelineId,
2206 390 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2207 390 : ) -> Self {
2208 390 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2209 390 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2210 390 : let timeline_id = timeline_id_raw.to_string();
2211 390 : let flush_time_histo = StorageTimeMetrics::new(
2212 390 : StorageTimeOperation::LayerFlush,
2213 390 : &tenant_id,
2214 390 : &shard_id,
2215 390 : &timeline_id,
2216 390 : );
2217 390 : let compact_time_histo = StorageTimeMetrics::new(
2218 390 : StorageTimeOperation::Compact,
2219 390 : &tenant_id,
2220 390 : &shard_id,
2221 390 : &timeline_id,
2222 390 : );
2223 390 : let create_images_time_histo = StorageTimeMetrics::new(
2224 390 : StorageTimeOperation::CreateImages,
2225 390 : &tenant_id,
2226 390 : &shard_id,
2227 390 : &timeline_id,
2228 390 : );
2229 390 : let logical_size_histo = StorageTimeMetrics::new(
2230 390 : StorageTimeOperation::LogicalSize,
2231 390 : &tenant_id,
2232 390 : &shard_id,
2233 390 : &timeline_id,
2234 390 : );
2235 390 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2236 390 : StorageTimeOperation::ImitateLogicalSize,
2237 390 : &tenant_id,
2238 390 : &shard_id,
2239 390 : &timeline_id,
2240 390 : );
2241 390 : let load_layer_map_histo = StorageTimeMetrics::new(
2242 390 : StorageTimeOperation::LoadLayerMap,
2243 390 : &tenant_id,
2244 390 : &shard_id,
2245 390 : &timeline_id,
2246 390 : );
2247 390 : let garbage_collect_histo = StorageTimeMetrics::new(
2248 390 : StorageTimeOperation::Gc,
2249 390 : &tenant_id,
2250 390 : &shard_id,
2251 390 : &timeline_id,
2252 390 : );
2253 390 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2254 390 : StorageTimeOperation::FindGcCutoffs,
2255 390 : &tenant_id,
2256 390 : &shard_id,
2257 390 : &timeline_id,
2258 390 : );
2259 390 : let last_record_gauge = LAST_RECORD_LSN
2260 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2261 390 : .unwrap();
2262 390 :
2263 390 : let pitr_history_size = PITR_HISTORY_SIZE
2264 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2265 390 : .unwrap();
2266 390 :
2267 390 : let archival_size = TIMELINE_ARCHIVE_SIZE
2268 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2269 390 : .unwrap();
2270 390 :
2271 390 : let layer_size_image = TIMELINE_LAYER_SIZE
2272 390 : .get_metric_with_label_values(&[
2273 390 : &tenant_id,
2274 390 : &shard_id,
2275 390 : &timeline_id,
2276 390 : MetricLayerKind::Image.into(),
2277 390 : ])
2278 390 : .unwrap();
2279 390 :
2280 390 : let layer_count_image = TIMELINE_LAYER_COUNT
2281 390 : .get_metric_with_label_values(&[
2282 390 : &tenant_id,
2283 390 : &shard_id,
2284 390 : &timeline_id,
2285 390 : MetricLayerKind::Image.into(),
2286 390 : ])
2287 390 : .unwrap();
2288 390 :
2289 390 : let layer_size_delta = TIMELINE_LAYER_SIZE
2290 390 : .get_metric_with_label_values(&[
2291 390 : &tenant_id,
2292 390 : &shard_id,
2293 390 : &timeline_id,
2294 390 : MetricLayerKind::Delta.into(),
2295 390 : ])
2296 390 : .unwrap();
2297 390 :
2298 390 : let layer_count_delta = TIMELINE_LAYER_COUNT
2299 390 : .get_metric_with_label_values(&[
2300 390 : &tenant_id,
2301 390 : &shard_id,
2302 390 : &timeline_id,
2303 390 : MetricLayerKind::Delta.into(),
2304 390 : ])
2305 390 : .unwrap();
2306 390 :
2307 390 : let standby_horizon_gauge = STANDBY_HORIZON
2308 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2309 390 : .unwrap();
2310 390 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2311 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2312 390 : .unwrap();
2313 390 : // TODO: we shouldn't expose this metric
2314 390 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2315 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2316 390 : .unwrap();
2317 390 : let aux_file_size_gauge = AUX_FILE_SIZE
2318 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2319 390 : .unwrap();
2320 390 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
2321 390 : let directory_entries_count_gauge_closure = {
2322 390 : let tenant_shard_id = *tenant_shard_id;
2323 390 : let timeline_id_raw = *timeline_id_raw;
2324 0 : move || {
2325 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2326 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2327 0 : let timeline_id = timeline_id_raw.to_string();
2328 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
2329 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2330 0 : .unwrap();
2331 0 : gauge
2332 0 : }
2333 : };
2334 390 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
2335 390 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
2336 390 : let evictions = EVICTIONS
2337 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2338 390 : .unwrap();
2339 390 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
2340 390 : .build(&tenant_id, &shard_id, &timeline_id);
2341 390 :
2342 390 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
2343 390 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2344 390 : .unwrap();
2345 390 :
2346 390 : TimelineMetrics {
2347 390 : tenant_id,
2348 390 : shard_id,
2349 390 : timeline_id,
2350 390 : flush_time_histo,
2351 390 : compact_time_histo,
2352 390 : create_images_time_histo,
2353 390 : logical_size_histo,
2354 390 : imitate_logical_size_histo,
2355 390 : garbage_collect_histo,
2356 390 : find_gc_cutoffs_histo,
2357 390 : load_layer_map_histo,
2358 390 : last_record_gauge,
2359 390 : pitr_history_size,
2360 390 : archival_size,
2361 390 : layer_size_image,
2362 390 : layer_count_image,
2363 390 : layer_size_delta,
2364 390 : layer_count_delta,
2365 390 : standby_horizon_gauge,
2366 390 : resident_physical_size_gauge,
2367 390 : current_logical_size_gauge,
2368 390 : aux_file_size_gauge,
2369 390 : directory_entries_count_gauge,
2370 390 : evictions,
2371 390 : evictions_with_low_residence_duration: std::sync::RwLock::new(
2372 390 : evictions_with_low_residence_duration,
2373 390 : ),
2374 390 : valid_lsn_lease_count_gauge,
2375 390 : shutdown: std::sync::atomic::AtomicBool::default(),
2376 390 : }
2377 390 : }
2378 :
2379 1476 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
2380 1476 : self.resident_physical_size_add(sz);
2381 1476 : }
2382 :
2383 450 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
2384 450 : self.resident_physical_size_gauge.sub(sz);
2385 450 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
2386 450 : }
2387 :
2388 1506 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
2389 1506 : self.resident_physical_size_gauge.add(sz);
2390 1506 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
2391 1506 : }
2392 :
2393 8 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
2394 8 : self.resident_physical_size_gauge.get()
2395 8 : }
2396 :
2397 8 : pub(crate) fn shutdown(&self) {
2398 8 : let was_shutdown = self
2399 8 : .shutdown
2400 8 : .swap(true, std::sync::atomic::Ordering::Relaxed);
2401 8 :
2402 8 : if was_shutdown {
2403 : // this happens on tenant deletion because tenant first shuts down timelines, then
2404 : // invokes timeline deletion which first shuts down the timeline again.
2405 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2406 0 : return;
2407 8 : }
2408 8 :
2409 8 : let tenant_id = &self.tenant_id;
2410 8 : let timeline_id = &self.timeline_id;
2411 8 : let shard_id = &self.shard_id;
2412 8 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2413 8 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2414 8 : {
2415 8 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
2416 8 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2417 8 : }
2418 8 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2419 8 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
2420 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2421 8 : }
2422 :
2423 8 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2424 8 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2425 8 :
2426 8 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&[
2427 8 : tenant_id,
2428 8 : shard_id,
2429 8 : timeline_id,
2430 8 : MetricLayerKind::Image.into(),
2431 8 : ]);
2432 8 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&[
2433 8 : tenant_id,
2434 8 : shard_id,
2435 8 : timeline_id,
2436 8 : MetricLayerKind::Image.into(),
2437 8 : ]);
2438 8 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&[
2439 8 : tenant_id,
2440 8 : shard_id,
2441 8 : timeline_id,
2442 8 : MetricLayerKind::Delta.into(),
2443 8 : ]);
2444 8 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&[
2445 8 : tenant_id,
2446 8 : shard_id,
2447 8 : timeline_id,
2448 8 : MetricLayerKind::Delta.into(),
2449 8 : ]);
2450 8 :
2451 8 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2452 8 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2453 8 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2454 8 :
2455 8 : self.evictions_with_low_residence_duration
2456 8 : .write()
2457 8 : .unwrap()
2458 8 : .remove(tenant_id, shard_id, timeline_id);
2459 :
2460 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
2461 : // removed at the end of it. The idea is to have the metrics outlive the
2462 : // entity during which they're observed, e.g., the smgr metrics shall
2463 : // outlive an individual smgr connection, but not the timeline.
2464 :
2465 72 : for op in StorageTimeOperation::VARIANTS {
2466 64 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
2467 64 : op,
2468 64 : tenant_id,
2469 64 : shard_id,
2470 64 : timeline_id,
2471 64 : ]);
2472 64 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
2473 64 : op,
2474 64 : tenant_id,
2475 64 : shard_id,
2476 64 : timeline_id,
2477 64 : ]);
2478 64 : }
2479 :
2480 24 : for op in STORAGE_IO_SIZE_OPERATIONS {
2481 16 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
2482 16 : }
2483 :
2484 8 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
2485 8 : SmgrQueryType::GetPageAtLsn.into(),
2486 8 : tenant_id,
2487 8 : shard_id,
2488 8 : timeline_id,
2489 8 : ]);
2490 8 : }
2491 : }
2492 :
2493 6 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
2494 6 : // Only shard zero deals in synthetic sizes
2495 6 : if tenant_shard_id.is_shard_zero() {
2496 6 : let tid = tenant_shard_id.tenant_id.to_string();
2497 6 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
2498 6 : }
2499 :
2500 : // we leave the BROKEN_TENANTS_SET entry if any
2501 6 : }
2502 :
2503 : use futures::Future;
2504 : use pin_project_lite::pin_project;
2505 : use std::collections::HashMap;
2506 : use std::num::NonZeroUsize;
2507 : use std::pin::Pin;
2508 : use std::sync::atomic::AtomicU64;
2509 : use std::sync::{Arc, Mutex};
2510 : use std::task::{Context, Poll};
2511 : use std::time::{Duration, Instant};
2512 :
2513 : use crate::context::{PageContentKind, RequestContext};
2514 : use crate::task_mgr::TaskKind;
2515 : use crate::tenant::mgr::TenantSlot;
2516 :
2517 : /// Maintain a per timeline gauge in addition to the global gauge.
2518 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
2519 : last_set: AtomicU64,
2520 : gauge: UIntGauge,
2521 : }
2522 :
2523 : impl PerTimelineRemotePhysicalSizeGauge {
2524 400 : fn new(per_timeline_gauge: UIntGauge) -> Self {
2525 400 : Self {
2526 400 : last_set: AtomicU64::new(0),
2527 400 : gauge: per_timeline_gauge,
2528 400 : }
2529 400 : }
2530 1752 : pub(crate) fn set(&self, sz: u64) {
2531 1752 : self.gauge.set(sz);
2532 1752 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
2533 1752 : if sz < prev {
2534 26 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
2535 1726 : } else {
2536 1726 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
2537 1726 : };
2538 1752 : }
2539 2 : pub(crate) fn get(&self) -> u64 {
2540 2 : self.gauge.get()
2541 2 : }
2542 : }
2543 :
2544 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
2545 18 : fn drop(&mut self) {
2546 18 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
2547 18 : }
2548 : }
2549 :
2550 : pub(crate) struct RemoteTimelineClientMetrics {
2551 : tenant_id: String,
2552 : shard_id: String,
2553 : timeline_id: String,
2554 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
2555 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
2556 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
2557 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
2558 : }
2559 :
2560 : impl RemoteTimelineClientMetrics {
2561 400 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
2562 400 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
2563 400 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
2564 400 : let timeline_id_str = timeline_id.to_string();
2565 400 :
2566 400 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
2567 400 : REMOTE_PHYSICAL_SIZE
2568 400 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
2569 400 : .unwrap(),
2570 400 : );
2571 400 :
2572 400 : RemoteTimelineClientMetrics {
2573 400 : tenant_id: tenant_id_str,
2574 400 : shard_id: shard_id_str,
2575 400 : timeline_id: timeline_id_str,
2576 400 : calls: Mutex::new(HashMap::default()),
2577 400 : bytes_started_counter: Mutex::new(HashMap::default()),
2578 400 : bytes_finished_counter: Mutex::new(HashMap::default()),
2579 400 : remote_physical_size_gauge,
2580 400 : }
2581 400 : }
2582 :
2583 2666 : pub fn remote_operation_time(
2584 2666 : &self,
2585 2666 : file_kind: &RemoteOpFileKind,
2586 2666 : op_kind: &RemoteOpKind,
2587 2666 : status: &'static str,
2588 2666 : ) -> Histogram {
2589 2666 : let key = (file_kind.as_str(), op_kind.as_str(), status);
2590 2666 : REMOTE_OPERATION_TIME
2591 2666 : .get_metric_with_label_values(&[key.0, key.1, key.2])
2592 2666 : .unwrap()
2593 2666 : }
2594 :
2595 6224 : fn calls_counter_pair(
2596 6224 : &self,
2597 6224 : file_kind: &RemoteOpFileKind,
2598 6224 : op_kind: &RemoteOpKind,
2599 6224 : ) -> IntCounterPair {
2600 6224 : let mut guard = self.calls.lock().unwrap();
2601 6224 : let key = (file_kind.as_str(), op_kind.as_str());
2602 6224 : let metric = guard.entry(key).or_insert_with(move || {
2603 694 : REMOTE_TIMELINE_CLIENT_CALLS
2604 694 : .get_metric_with_label_values(&[
2605 694 : &self.tenant_id,
2606 694 : &self.shard_id,
2607 694 : &self.timeline_id,
2608 694 : key.0,
2609 694 : key.1,
2610 694 : ])
2611 694 : .unwrap()
2612 6224 : });
2613 6224 : metric.clone()
2614 6224 : }
2615 :
2616 1484 : fn bytes_started_counter(
2617 1484 : &self,
2618 1484 : file_kind: &RemoteOpFileKind,
2619 1484 : op_kind: &RemoteOpKind,
2620 1484 : ) -> IntCounter {
2621 1484 : let mut guard = self.bytes_started_counter.lock().unwrap();
2622 1484 : let key = (file_kind.as_str(), op_kind.as_str());
2623 1484 : let metric = guard.entry(key).or_insert_with(move || {
2624 264 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
2625 264 : .get_metric_with_label_values(&[
2626 264 : &self.tenant_id,
2627 264 : &self.shard_id,
2628 264 : &self.timeline_id,
2629 264 : key.0,
2630 264 : key.1,
2631 264 : ])
2632 264 : .unwrap()
2633 1484 : });
2634 1484 : metric.clone()
2635 1484 : }
2636 :
2637 2762 : fn bytes_finished_counter(
2638 2762 : &self,
2639 2762 : file_kind: &RemoteOpFileKind,
2640 2762 : op_kind: &RemoteOpKind,
2641 2762 : ) -> IntCounter {
2642 2762 : let mut guard = self.bytes_finished_counter.lock().unwrap();
2643 2762 : let key = (file_kind.as_str(), op_kind.as_str());
2644 2762 : let metric = guard.entry(key).or_insert_with(move || {
2645 264 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
2646 264 : .get_metric_with_label_values(&[
2647 264 : &self.tenant_id,
2648 264 : &self.shard_id,
2649 264 : &self.timeline_id,
2650 264 : key.0,
2651 264 : key.1,
2652 264 : ])
2653 264 : .unwrap()
2654 2762 : });
2655 2762 : metric.clone()
2656 2762 : }
2657 : }
2658 :
2659 : #[cfg(test)]
2660 : impl RemoteTimelineClientMetrics {
2661 6 : pub fn get_bytes_started_counter_value(
2662 6 : &self,
2663 6 : file_kind: &RemoteOpFileKind,
2664 6 : op_kind: &RemoteOpKind,
2665 6 : ) -> Option<u64> {
2666 6 : let guard = self.bytes_started_counter.lock().unwrap();
2667 6 : let key = (file_kind.as_str(), op_kind.as_str());
2668 6 : guard.get(&key).map(|counter| counter.get())
2669 6 : }
2670 :
2671 6 : pub fn get_bytes_finished_counter_value(
2672 6 : &self,
2673 6 : file_kind: &RemoteOpFileKind,
2674 6 : op_kind: &RemoteOpKind,
2675 6 : ) -> Option<u64> {
2676 6 : let guard = self.bytes_finished_counter.lock().unwrap();
2677 6 : let key = (file_kind.as_str(), op_kind.as_str());
2678 6 : guard.get(&key).map(|counter| counter.get())
2679 6 : }
2680 : }
2681 :
2682 : /// See [`RemoteTimelineClientMetrics::call_begin`].
2683 : #[must_use]
2684 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
2685 : /// Decremented on drop.
2686 : calls_counter_pair: Option<IntCounterPair>,
2687 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
2688 : bytes_finished: Option<(IntCounter, u64)>,
2689 : }
2690 :
2691 : impl RemoteTimelineClientCallMetricGuard {
2692 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
2693 : /// The caller vouches to do the metric updates manually.
2694 3342 : pub fn will_decrement_manually(mut self) {
2695 3342 : let RemoteTimelineClientCallMetricGuard {
2696 3342 : calls_counter_pair,
2697 3342 : bytes_finished,
2698 3342 : } = &mut self;
2699 3342 : calls_counter_pair.take();
2700 3342 : bytes_finished.take();
2701 3342 : }
2702 : }
2703 :
2704 : impl Drop for RemoteTimelineClientCallMetricGuard {
2705 3368 : fn drop(&mut self) {
2706 3368 : let RemoteTimelineClientCallMetricGuard {
2707 3368 : calls_counter_pair,
2708 3368 : bytes_finished,
2709 3368 : } = self;
2710 3368 : if let Some(guard) = calls_counter_pair.take() {
2711 26 : guard.dec();
2712 3342 : }
2713 3368 : if let Some((bytes_finished_metric, value)) = bytes_finished {
2714 0 : bytes_finished_metric.inc_by(*value);
2715 3368 : }
2716 3368 : }
2717 : }
2718 :
2719 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
2720 : /// track the byte size of this call in applicable metric(s).
2721 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
2722 : /// Do not account for this call's byte size in any metrics.
2723 : /// The `reason` field is there to make the call sites self-documenting
2724 : /// about why they don't need the metric.
2725 : DontTrackSize { reason: &'static str },
2726 : /// Track the byte size of the call in applicable metric(s).
2727 : Bytes(u64),
2728 : }
2729 :
2730 : impl RemoteTimelineClientMetrics {
2731 : /// Update the metrics that change when a call to the remote timeline client instance starts.
2732 : ///
2733 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
2734 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
2735 : /// is more suitable.
2736 : /// Never do both.
2737 3368 : pub(crate) fn call_begin(
2738 3368 : &self,
2739 3368 : file_kind: &RemoteOpFileKind,
2740 3368 : op_kind: &RemoteOpKind,
2741 3368 : size: RemoteTimelineClientMetricsCallTrackSize,
2742 3368 : ) -> RemoteTimelineClientCallMetricGuard {
2743 3368 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
2744 3368 : calls_counter_pair.inc();
2745 :
2746 3368 : let bytes_finished = match size {
2747 1884 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
2748 1884 : // nothing to do
2749 1884 : None
2750 : }
2751 1484 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
2752 1484 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
2753 1484 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
2754 1484 : Some((finished_counter, size))
2755 : }
2756 : };
2757 3368 : RemoteTimelineClientCallMetricGuard {
2758 3368 : calls_counter_pair: Some(calls_counter_pair),
2759 3368 : bytes_finished,
2760 3368 : }
2761 3368 : }
2762 :
2763 : /// Manually udpate the metrics that track completions, instead of using the guard object.
2764 : /// Using the guard object is generally preferable.
2765 : /// See [`call_begin`](Self::call_begin) for more context.
2766 2856 : pub(crate) fn call_end(
2767 2856 : &self,
2768 2856 : file_kind: &RemoteOpFileKind,
2769 2856 : op_kind: &RemoteOpKind,
2770 2856 : size: RemoteTimelineClientMetricsCallTrackSize,
2771 2856 : ) {
2772 2856 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
2773 2856 : calls_counter_pair.dec();
2774 2856 : match size {
2775 1578 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
2776 1278 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
2777 1278 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
2778 1278 : }
2779 : }
2780 2856 : }
2781 : }
2782 :
2783 : impl Drop for RemoteTimelineClientMetrics {
2784 18 : fn drop(&mut self) {
2785 18 : let RemoteTimelineClientMetrics {
2786 18 : tenant_id,
2787 18 : shard_id,
2788 18 : timeline_id,
2789 18 : remote_physical_size_gauge,
2790 18 : calls,
2791 18 : bytes_started_counter,
2792 18 : bytes_finished_counter,
2793 18 : } = self;
2794 24 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
2795 24 : let mut res = [Ok(()), Ok(())];
2796 24 : REMOTE_TIMELINE_CLIENT_CALLS
2797 24 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
2798 24 : // don't care about results
2799 24 : }
2800 18 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
2801 6 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
2802 6 : tenant_id,
2803 6 : shard_id,
2804 6 : timeline_id,
2805 6 : a,
2806 6 : b,
2807 6 : ]);
2808 6 : }
2809 18 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
2810 6 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
2811 6 : tenant_id,
2812 6 : shard_id,
2813 6 : timeline_id,
2814 6 : a,
2815 6 : b,
2816 6 : ]);
2817 6 : }
2818 18 : {
2819 18 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
2820 18 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2821 18 : }
2822 18 : }
2823 : }
2824 :
2825 : /// Wrapper future that measures the time spent by a remote storage operation,
2826 : /// and records the time and success/failure as a prometheus metric.
2827 : pub(crate) trait MeasureRemoteOp: Sized {
2828 2735 : fn measure_remote_op(
2829 2735 : self,
2830 2735 : file_kind: RemoteOpFileKind,
2831 2735 : op: RemoteOpKind,
2832 2735 : metrics: Arc<RemoteTimelineClientMetrics>,
2833 2735 : ) -> MeasuredRemoteOp<Self> {
2834 2735 : let start = Instant::now();
2835 2735 : MeasuredRemoteOp {
2836 2735 : inner: self,
2837 2735 : file_kind,
2838 2735 : op,
2839 2735 : start,
2840 2735 : metrics,
2841 2735 : }
2842 2735 : }
2843 : }
2844 :
2845 : impl<T: Sized> MeasureRemoteOp for T {}
2846 :
2847 : pin_project! {
2848 : pub(crate) struct MeasuredRemoteOp<F>
2849 : {
2850 : #[pin]
2851 : inner: F,
2852 : file_kind: RemoteOpFileKind,
2853 : op: RemoteOpKind,
2854 : start: Instant,
2855 : metrics: Arc<RemoteTimelineClientMetrics>,
2856 : }
2857 : }
2858 :
2859 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
2860 : type Output = Result<O, E>;
2861 :
2862 41515 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
2863 41515 : let this = self.project();
2864 41515 : let poll_result = this.inner.poll(cx);
2865 41515 : if let Poll::Ready(ref res) = poll_result {
2866 2666 : let duration = this.start.elapsed();
2867 2666 : let status = if res.is_ok() { &"success" } else { &"failure" };
2868 2666 : this.metrics
2869 2666 : .remote_operation_time(this.file_kind, this.op, status)
2870 2666 : .observe(duration.as_secs_f64());
2871 38849 : }
2872 41515 : poll_result
2873 41515 : }
2874 : }
2875 :
2876 : pub mod tokio_epoll_uring {
2877 : use metrics::{register_int_counter, UIntGauge};
2878 : use once_cell::sync::Lazy;
2879 :
2880 : pub struct Collector {
2881 : descs: Vec<metrics::core::Desc>,
2882 : systems_created: UIntGauge,
2883 : systems_destroyed: UIntGauge,
2884 : }
2885 :
2886 : impl metrics::core::Collector for Collector {
2887 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
2888 0 : self.descs.iter().collect()
2889 0 : }
2890 :
2891 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
2892 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
2893 0 : let tokio_epoll_uring::metrics::Metrics {
2894 0 : systems_created,
2895 0 : systems_destroyed,
2896 0 : } = tokio_epoll_uring::metrics::global();
2897 0 : self.systems_created.set(systems_created);
2898 0 : mfs.extend(self.systems_created.collect());
2899 0 : self.systems_destroyed.set(systems_destroyed);
2900 0 : mfs.extend(self.systems_destroyed.collect());
2901 0 : mfs
2902 0 : }
2903 : }
2904 :
2905 : impl Collector {
2906 : const NMETRICS: usize = 2;
2907 :
2908 : #[allow(clippy::new_without_default)]
2909 0 : pub fn new() -> Self {
2910 0 : let mut descs = Vec::new();
2911 0 :
2912 0 : let systems_created = UIntGauge::new(
2913 0 : "pageserver_tokio_epoll_uring_systems_created",
2914 0 : "counter of tokio-epoll-uring systems that were created",
2915 0 : )
2916 0 : .unwrap();
2917 0 : descs.extend(
2918 0 : metrics::core::Collector::desc(&systems_created)
2919 0 : .into_iter()
2920 0 : .cloned(),
2921 0 : );
2922 0 :
2923 0 : let systems_destroyed = UIntGauge::new(
2924 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
2925 0 : "counter of tokio-epoll-uring systems that were destroyed",
2926 0 : )
2927 0 : .unwrap();
2928 0 : descs.extend(
2929 0 : metrics::core::Collector::desc(&systems_destroyed)
2930 0 : .into_iter()
2931 0 : .cloned(),
2932 0 : );
2933 0 :
2934 0 : Self {
2935 0 : descs,
2936 0 : systems_created,
2937 0 : systems_destroyed,
2938 0 : }
2939 0 : }
2940 : }
2941 :
2942 87 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
2943 : register_int_counter!(
2944 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
2945 : "Number of times where thread_local_system creation spanned multiple executor threads",
2946 : )
2947 87 : .unwrap()
2948 87 : });
2949 :
2950 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
2951 : register_int_counter!(
2952 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
2953 : "Number of times thread_local_system creation failed and was retried after back-off.",
2954 : )
2955 0 : .unwrap()
2956 0 : });
2957 : }
2958 :
2959 : pub(crate) mod tenant_throttling {
2960 : use metrics::{register_int_counter_vec, IntCounter};
2961 : use once_cell::sync::Lazy;
2962 :
2963 : use crate::tenant::{self, throttle::Metric};
2964 :
2965 : pub(crate) struct TimelineGet {
2966 : wait_time: IntCounter,
2967 : count: IntCounter,
2968 : }
2969 :
2970 150 : pub(crate) static TIMELINE_GET: Lazy<TimelineGet> = Lazy::new(|| {
2971 150 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
2972 150 : register_int_counter_vec!(
2973 150 : "pageserver_tenant_throttling_wait_usecs_sum_global",
2974 150 : "Sum of microseconds that tenants spent waiting for a tenant throttle of a given kind.",
2975 150 : &["kind"]
2976 150 : )
2977 150 : .unwrap()
2978 150 : });
2979 150 :
2980 150 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
2981 150 : register_int_counter_vec!(
2982 150 : "pageserver_tenant_throttling_count_global",
2983 150 : "Count of tenant throttlings, by kind of throttle.",
2984 150 : &["kind"]
2985 150 : )
2986 150 : .unwrap()
2987 150 : });
2988 150 :
2989 150 : let kind = "timeline_get";
2990 150 : TimelineGet {
2991 150 : wait_time: WAIT_USECS.with_label_values(&[kind]),
2992 150 : count: WAIT_COUNT.with_label_values(&[kind]),
2993 150 : }
2994 150 : });
2995 :
2996 : impl Metric for &'static TimelineGet {
2997 : #[inline(always)]
2998 0 : fn observe_throttling(
2999 0 : &self,
3000 0 : tenant::throttle::Observation { wait_time }: &tenant::throttle::Observation,
3001 0 : ) {
3002 0 : let val = u64::try_from(wait_time.as_micros()).unwrap();
3003 0 : self.wait_time.inc_by(val);
3004 0 : self.count.inc();
3005 0 : }
3006 : }
3007 : }
3008 :
3009 : pub(crate) mod disk_usage_based_eviction {
3010 : use super::*;
3011 :
3012 : pub(crate) struct Metrics {
3013 : pub(crate) tenant_collection_time: Histogram,
3014 : pub(crate) tenant_layer_count: Histogram,
3015 : pub(crate) layers_collected: IntCounter,
3016 : pub(crate) layers_selected: IntCounter,
3017 : pub(crate) layers_evicted: IntCounter,
3018 : }
3019 :
3020 : impl Default for Metrics {
3021 0 : fn default() -> Self {
3022 0 : let tenant_collection_time = register_histogram!(
3023 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
3024 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
3025 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
3026 : )
3027 0 : .unwrap();
3028 0 :
3029 0 : let tenant_layer_count = register_histogram!(
3030 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
3031 : "Amount of layers gathered from a tenant",
3032 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
3033 : )
3034 0 : .unwrap();
3035 0 :
3036 0 : let layers_collected = register_int_counter!(
3037 : "pageserver_disk_usage_based_eviction_collected_layers_total",
3038 : "Amount of layers collected"
3039 : )
3040 0 : .unwrap();
3041 0 :
3042 0 : let layers_selected = register_int_counter!(
3043 : "pageserver_disk_usage_based_eviction_select_layers_total",
3044 : "Amount of layers selected"
3045 : )
3046 0 : .unwrap();
3047 0 :
3048 0 : let layers_evicted = register_int_counter!(
3049 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
3050 : "Amount of layers successfully evicted"
3051 : )
3052 0 : .unwrap();
3053 0 :
3054 0 : Self {
3055 0 : tenant_collection_time,
3056 0 : tenant_layer_count,
3057 0 : layers_collected,
3058 0 : layers_selected,
3059 0 : layers_evicted,
3060 0 : }
3061 0 : }
3062 : }
3063 :
3064 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
3065 : }
3066 :
3067 144 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
3068 : register_uint_gauge_vec!(
3069 : "pageserver_tokio_executor_thread_configured_count",
3070 : "Total number of configued tokio executor threads in the process.
3071 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
3072 : &["setup"],
3073 : )
3074 144 : .unwrap()
3075 144 : });
3076 :
3077 144 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
3078 144 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
3079 144 : let _guard = SERIALIZE.lock().unwrap();
3080 144 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
3081 144 : TOKIO_EXECUTOR_THREAD_COUNT
3082 144 : .get_metric_with_label_values(&[setup])
3083 144 : .unwrap()
3084 144 : .set(u64::try_from(num_threads.get()).unwrap());
3085 144 : }
3086 :
3087 0 : pub fn preinitialize_metrics() {
3088 0 : // Python tests need these and on some we do alerting.
3089 0 : //
3090 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
3091 0 : // order:
3092 0 : // - global metrics reside in a Lazy<PageserverMetrics>
3093 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
3094 0 : // - could move the statics into TimelineMetrics::new()?
3095 0 :
3096 0 : // counters
3097 0 : [
3098 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
3099 0 : &WALRECEIVER_STARTED_CONNECTIONS,
3100 0 : &WALRECEIVER_BROKER_UPDATES,
3101 0 : &WALRECEIVER_CANDIDATES_ADDED,
3102 0 : &WALRECEIVER_CANDIDATES_REMOVED,
3103 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
3104 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
3105 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
3106 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
3107 0 : ]
3108 0 : .into_iter()
3109 0 : .for_each(|c| {
3110 0 : Lazy::force(c);
3111 0 : });
3112 0 :
3113 0 : // Deletion queue stats
3114 0 : Lazy::force(&DELETION_QUEUE);
3115 0 :
3116 0 : // Tenant stats
3117 0 : Lazy::force(&TENANT);
3118 0 :
3119 0 : // Tenant manager stats
3120 0 : Lazy::force(&TENANT_MANAGER);
3121 0 :
3122 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
3123 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
3124 :
3125 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
3126 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
3127 0 : // values from last restart.
3128 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
3129 0 : }
3130 :
3131 : // countervecs
3132 0 : [&BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT]
3133 0 : .into_iter()
3134 0 : .for_each(|c| {
3135 0 : Lazy::force(c);
3136 0 : });
3137 0 :
3138 0 : // gauges
3139 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
3140 0 :
3141 0 : // histograms
3142 0 : [
3143 0 : &READ_NUM_LAYERS_VISITED,
3144 0 : &VEC_READ_NUM_LAYERS_VISITED,
3145 0 : &WAIT_LSN_TIME,
3146 0 : &WAL_REDO_TIME,
3147 0 : &WAL_REDO_RECORDS_HISTOGRAM,
3148 0 : &WAL_REDO_BYTES_HISTOGRAM,
3149 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
3150 0 : ]
3151 0 : .into_iter()
3152 0 : .for_each(|h| {
3153 0 : Lazy::force(h);
3154 0 : });
3155 0 :
3156 0 : // Custom
3157 0 : Lazy::force(&RECONSTRUCT_TIME);
3158 0 : Lazy::force(&tenant_throttling::TIMELINE_GET);
3159 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
3160 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
3161 0 : }
|