Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::pin::Pin;
5 : use std::sync::atomic::AtomicU64;
6 : use std::sync::{Arc, Mutex};
7 : use std::task::{Context, Poll};
8 : use std::time::{Duration, Instant};
9 :
10 : use enum_map::{Enum as _, EnumMap};
11 : use futures::Future;
12 : use metrics::{
13 : Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
14 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
15 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
16 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
17 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
18 : };
19 : use once_cell::sync::Lazy;
20 : use pageserver_api::config::{
21 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
22 : PageServiceProtocolPipelinedExecutionStrategy,
23 : };
24 : use pageserver_api::models::InMemoryLayerInfo;
25 : use pageserver_api::shard::TenantShardId;
26 : use pin_project_lite::pin_project;
27 : use postgres_backend::{QueryError, is_expected_io_error};
28 : use pq_proto::framed::ConnectionError;
29 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
30 : use strum_macros::{IntoStaticStr, VariantNames};
31 : use utils::id::TimelineId;
32 :
33 : use crate::config::PageServerConf;
34 : use crate::context::{PageContentKind, RequestContext};
35 : use crate::pgdatadir_mapping::DatadirModificationStats;
36 : use crate::task_mgr::TaskKind;
37 : use crate::tenant::Timeline;
38 : use crate::tenant::layer_map::LayerMap;
39 : use crate::tenant::mgr::TenantSlot;
40 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
41 : use crate::tenant::tasks::BackgroundLoopKind;
42 : use crate::tenant::throttle::ThrottleResult;
43 :
44 : /// Prometheus histogram buckets (in seconds) for operations in the critical
45 : /// path. In other words, operations that directly affect that latency of user
46 : /// queries.
47 : ///
48 : /// The buckets capture the majority of latencies in the microsecond and
49 : /// millisecond range but also extend far enough up to distinguish "bad" from
50 : /// "really bad".
51 : const CRITICAL_OP_BUCKETS: &[f64] = &[
52 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
53 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
54 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
55 : ];
56 :
57 : // Metrics collected on operations on the storage repository.
58 : #[derive(Debug, VariantNames, IntoStaticStr)]
59 : #[strum(serialize_all = "kebab_case")]
60 : pub(crate) enum StorageTimeOperation {
61 : #[strum(serialize = "layer flush")]
62 : LayerFlush,
63 :
64 : #[strum(serialize = "layer flush delay")]
65 : LayerFlushDelay,
66 :
67 : #[strum(serialize = "compact")]
68 : Compact,
69 :
70 : #[strum(serialize = "create images")]
71 : CreateImages,
72 :
73 : #[strum(serialize = "logical size")]
74 : LogicalSize,
75 :
76 : #[strum(serialize = "imitate logical size")]
77 : ImitateLogicalSize,
78 :
79 : #[strum(serialize = "load layer map")]
80 : LoadLayerMap,
81 :
82 : #[strum(serialize = "gc")]
83 : Gc,
84 :
85 : #[strum(serialize = "find gc cutoffs")]
86 : FindGcCutoffs,
87 : }
88 :
89 412 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
90 412 : register_counter_vec!(
91 412 : "pageserver_storage_operations_seconds_sum",
92 412 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
93 412 : &["operation", "tenant_id", "shard_id", "timeline_id"],
94 412 : )
95 412 : .expect("failed to define a metric")
96 412 : });
97 :
98 412 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
99 412 : register_int_counter_vec!(
100 412 : "pageserver_storage_operations_seconds_count",
101 412 : "Count of storage operations with operation, tenant and timeline dimensions",
102 412 : &["operation", "tenant_id", "shard_id", "timeline_id"],
103 412 : )
104 412 : .expect("failed to define a metric")
105 412 : });
106 :
107 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
108 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
109 :
110 412 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
111 412 : register_histogram_vec!(
112 412 : "pageserver_storage_operations_seconds_global",
113 412 : "Time spent on storage operations",
114 412 : &["operation"],
115 412 : STORAGE_OP_BUCKETS.into(),
116 412 : )
117 412 : .expect("failed to define a metric")
118 412 : });
119 :
120 : /// Measures layers visited per read (i.e. read amplification).
121 : ///
122 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
123 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
124 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
125 : /// care about.
126 412 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
127 412 : register_histogram_vec!(
128 412 : "pageserver_layers_per_read",
129 412 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
130 412 : &["tenant_id", "shard_id", "timeline_id"],
131 412 : // Low resolution to reduce cardinality.
132 412 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
133 412 : )
134 412 : .expect("failed to define a metric")
135 412 : });
136 :
137 404 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
138 404 : register_histogram!(
139 404 : "pageserver_layers_per_read_global",
140 404 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
141 404 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
142 404 : )
143 404 : .expect("failed to define a metric")
144 404 : });
145 :
146 404 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
147 404 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
148 404 : register_histogram!(
149 404 : "pageserver_deltas_per_read_global",
150 404 : "Number of delta pages applied to image page per read",
151 404 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
152 404 : )
153 404 : .expect("failed to define a metric")
154 404 : });
155 :
156 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
157 0 : register_uint_gauge!(
158 0 : "pageserver_concurrent_initdb",
159 0 : "Number of initdb processes running"
160 0 : )
161 0 : .expect("failed to define a metric")
162 0 : });
163 :
164 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
165 0 : register_histogram!(
166 0 : "pageserver_initdb_semaphore_seconds_global",
167 0 : "Time spent getting a permit from the global initdb semaphore",
168 0 : STORAGE_OP_BUCKETS.into()
169 0 : )
170 0 : .expect("failed to define metric")
171 0 : });
172 :
173 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
174 0 : register_histogram!(
175 0 : "pageserver_initdb_seconds_global",
176 0 : "Time spent performing initdb",
177 0 : STORAGE_OP_BUCKETS.into()
178 0 : )
179 0 : .expect("failed to define metric")
180 0 : });
181 :
182 : pub(crate) struct GetVectoredLatency {
183 : map: EnumMap<TaskKind, Option<Histogram>>,
184 : }
185 :
186 : #[allow(dead_code)]
187 : pub(crate) struct ScanLatency {
188 : map: EnumMap<TaskKind, Option<Histogram>>,
189 : }
190 :
191 : impl GetVectoredLatency {
192 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
193 : // cardinality of the metric.
194 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
195 :
196 39432 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
197 39432 : self.map[task_kind].as_ref()
198 39432 : }
199 : }
200 :
201 : impl ScanLatency {
202 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
203 : // cardinality of the metric.
204 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
205 :
206 24 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
207 24 : self.map[task_kind].as_ref()
208 24 : }
209 : }
210 :
211 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
212 : parent: &'a Histogram,
213 : start: std::time::Instant,
214 : }
215 :
216 : impl<'a> ScanLatencyOngoingRecording<'a> {
217 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
218 0 : let start = Instant::now();
219 0 : ScanLatencyOngoingRecording { parent, start }
220 0 : }
221 :
222 0 : pub(crate) fn observe(self) {
223 0 : let elapsed = self.start.elapsed();
224 0 : self.parent.observe(elapsed.as_secs_f64());
225 0 : }
226 : }
227 :
228 396 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
229 396 : let inner = register_histogram_vec!(
230 396 : "pageserver_get_vectored_seconds",
231 396 : "Time spent in get_vectored.",
232 396 : &["task_kind"],
233 396 : CRITICAL_OP_BUCKETS.into(),
234 396 : )
235 396 : .expect("failed to define a metric");
236 396 :
237 396 : GetVectoredLatency {
238 12276 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
239 12276 : let task_kind = TaskKind::from_usize(task_kind_idx);
240 12276 :
241 12276 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
242 792 : let task_kind = task_kind.into();
243 792 : Some(inner.with_label_values(&[task_kind]))
244 : } else {
245 11484 : None
246 : }
247 12276 : })),
248 396 : }
249 396 : });
250 :
251 8 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
252 8 : let inner = register_histogram_vec!(
253 8 : "pageserver_scan_seconds",
254 8 : "Time spent in scan.",
255 8 : &["task_kind"],
256 8 : CRITICAL_OP_BUCKETS.into(),
257 8 : )
258 8 : .expect("failed to define a metric");
259 8 :
260 8 : ScanLatency {
261 248 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
262 248 : let task_kind = TaskKind::from_usize(task_kind_idx);
263 248 :
264 248 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
265 8 : let task_kind = task_kind.into();
266 8 : Some(inner.with_label_values(&[task_kind]))
267 : } else {
268 240 : None
269 : }
270 248 : })),
271 8 : }
272 8 : });
273 :
274 : pub(crate) struct PageCacheMetricsForTaskKind {
275 : pub read_accesses_immutable: IntCounter,
276 : pub read_hits_immutable: IntCounter,
277 : }
278 :
279 : pub(crate) struct PageCacheMetrics {
280 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
281 : }
282 :
283 188 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
284 188 : register_int_counter_vec!(
285 188 : "pageserver_page_cache_read_hits_total",
286 188 : "Number of read accesses to the page cache that hit",
287 188 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
288 188 : )
289 188 : .expect("failed to define a metric")
290 188 : });
291 :
292 188 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
293 188 : register_int_counter_vec!(
294 188 : "pageserver_page_cache_read_accesses_total",
295 188 : "Number of read accesses to the page cache",
296 188 : &["task_kind", "key_kind", "content_kind"]
297 188 : )
298 188 : .expect("failed to define a metric")
299 188 : });
300 :
301 188 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
302 5828 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
303 5828 : let task_kind = TaskKind::from_usize(task_kind);
304 5828 : let task_kind: &'static str = task_kind.into();
305 46624 : EnumMap::from_array(std::array::from_fn(|content_kind| {
306 46624 : let content_kind = PageContentKind::from_usize(content_kind);
307 46624 : let content_kind: &'static str = content_kind.into();
308 46624 : PageCacheMetricsForTaskKind {
309 46624 : read_accesses_immutable: {
310 46624 : PAGE_CACHE_READ_ACCESSES
311 46624 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
312 46624 : .unwrap()
313 46624 : },
314 46624 :
315 46624 : read_hits_immutable: {
316 46624 : PAGE_CACHE_READ_HITS
317 46624 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
318 46624 : .unwrap()
319 46624 : },
320 46624 : }
321 46624 : }))
322 5828 : })),
323 188 : });
324 :
325 : impl PageCacheMetrics {
326 1948032 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
327 1948032 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
328 1948032 : }
329 : }
330 :
331 : pub(crate) struct PageCacheSizeMetrics {
332 : pub max_bytes: UIntGauge,
333 :
334 : pub current_bytes_immutable: UIntGauge,
335 : }
336 :
337 188 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
338 188 : register_uint_gauge_vec!(
339 188 : "pageserver_page_cache_size_current_bytes",
340 188 : "Current size of the page cache in bytes, by key kind",
341 188 : &["key_kind"]
342 188 : )
343 188 : .expect("failed to define a metric")
344 188 : });
345 :
346 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
347 188 : Lazy::new(|| PageCacheSizeMetrics {
348 188 : max_bytes: {
349 188 : register_uint_gauge!(
350 188 : "pageserver_page_cache_size_max_bytes",
351 188 : "Maximum size of the page cache in bytes"
352 188 : )
353 188 : .expect("failed to define a metric")
354 188 : },
355 188 : current_bytes_immutable: {
356 188 : PAGE_CACHE_SIZE_CURRENT_BYTES
357 188 : .get_metric_with_label_values(&["immutable"])
358 188 : .unwrap()
359 188 : },
360 188 : });
361 :
362 : pub(crate) mod page_cache_eviction_metrics {
363 : use std::num::NonZeroUsize;
364 :
365 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
366 : use once_cell::sync::Lazy;
367 :
368 : #[derive(Clone, Copy)]
369 : pub(crate) enum Outcome {
370 : FoundSlotUnused { iters: NonZeroUsize },
371 : FoundSlotEvicted { iters: NonZeroUsize },
372 : ItersExceeded { iters: NonZeroUsize },
373 : }
374 :
375 188 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
376 188 : register_int_counter_vec!(
377 188 : "pageserver_page_cache_find_victim_iters_total",
378 188 : "Counter for the number of iterations in the find_victim loop",
379 188 : &["outcome"],
380 188 : )
381 188 : .expect("failed to define a metric")
382 188 : });
383 :
384 188 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
385 188 : register_int_counter_vec!(
386 188 : "pageserver_page_cache_find_victim_calls",
387 188 : "Incremented at the end of each find_victim() call.\
388 188 : Filter by outcome to get e.g., eviction rate.",
389 188 : &["outcome"]
390 188 : )
391 188 : .unwrap()
392 188 : });
393 :
394 63578 : pub(crate) fn observe(outcome: Outcome) {
395 : macro_rules! dry {
396 : ($label:literal, $iters:expr) => {{
397 : static LABEL: &'static str = $label;
398 : static ITERS_TOTAL: Lazy<IntCounter> =
399 228 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
400 : static CALLS: Lazy<IntCounter> =
401 228 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
402 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
403 : CALLS.inc();
404 : }};
405 : }
406 63578 : match outcome {
407 3288 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
408 60290 : Outcome::FoundSlotEvicted { iters } => {
409 60290 : dry!("found_evicted", iters)
410 : }
411 0 : Outcome::ItersExceeded { iters } => {
412 0 : dry!("err_iters_exceeded", iters);
413 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
414 0 : }
415 : }
416 63578 : }
417 : }
418 :
419 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
420 0 : register_int_counter_vec!(
421 0 : "page_cache_errors_total",
422 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
423 0 : &["error_kind"]
424 0 : )
425 0 : .expect("failed to define a metric")
426 0 : });
427 :
428 : #[derive(IntoStaticStr)]
429 : #[strum(serialize_all = "kebab_case")]
430 : pub(crate) enum PageCacheErrorKind {
431 : AcquirePinnedSlotTimeout,
432 : EvictIterLimit,
433 : }
434 :
435 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
436 0 : PAGE_CACHE_ERRORS
437 0 : .get_metric_with_label_values(&[error_kind.into()])
438 0 : .unwrap()
439 0 : .inc();
440 0 : }
441 :
442 40 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
443 40 : register_histogram!(
444 40 : "pageserver_wait_lsn_seconds",
445 40 : "Time spent waiting for WAL to arrive",
446 40 : CRITICAL_OP_BUCKETS.into(),
447 40 : )
448 40 : .expect("failed to define a metric")
449 40 : });
450 :
451 412 : static FLUSH_WAIT_UPLOAD_TIME: Lazy<GaugeVec> = Lazy::new(|| {
452 412 : register_gauge_vec!(
453 412 : "pageserver_flush_wait_upload_seconds",
454 412 : "Time spent waiting for preceding uploads during layer flush",
455 412 : &["tenant_id", "shard_id", "timeline_id"]
456 412 : )
457 412 : .expect("failed to define a metric")
458 412 : });
459 :
460 412 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
461 412 : register_int_gauge_vec!(
462 412 : "pageserver_last_record_lsn",
463 412 : "Last record LSN grouped by timeline",
464 412 : &["tenant_id", "shard_id", "timeline_id"]
465 412 : )
466 412 : .expect("failed to define a metric")
467 412 : });
468 :
469 412 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
470 412 : register_int_gauge_vec!(
471 412 : "pageserver_disk_consistent_lsn",
472 412 : "Disk consistent LSN grouped by timeline",
473 412 : &["tenant_id", "shard_id", "timeline_id"]
474 412 : )
475 412 : .expect("failed to define a metric")
476 412 : });
477 :
478 412 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
479 412 : register_uint_gauge_vec!(
480 412 : "pageserver_projected_remote_consistent_lsn",
481 412 : "Projected remote consistent LSN grouped by timeline",
482 412 : &["tenant_id", "shard_id", "timeline_id"]
483 412 : )
484 412 : .expect("failed to define a metric")
485 412 : });
486 :
487 412 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
488 412 : register_uint_gauge_vec!(
489 412 : "pageserver_pitr_history_size",
490 412 : "Data written since PITR cutoff on this timeline",
491 412 : &["tenant_id", "shard_id", "timeline_id"]
492 412 : )
493 412 : .expect("failed to define a metric")
494 412 : });
495 :
496 : #[derive(
497 240 : strum_macros::EnumIter,
498 0 : strum_macros::EnumString,
499 : strum_macros::Display,
500 : strum_macros::IntoStaticStr,
501 : )]
502 : #[strum(serialize_all = "kebab_case")]
503 : pub(crate) enum LayerKind {
504 : Delta,
505 : Image,
506 : }
507 :
508 : #[derive(
509 100 : strum_macros::EnumIter,
510 0 : strum_macros::EnumString,
511 : strum_macros::Display,
512 : strum_macros::IntoStaticStr,
513 : )]
514 : #[strum(serialize_all = "kebab_case")]
515 : pub(crate) enum LayerLevel {
516 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
517 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
518 : Frozen,
519 : L0,
520 : L1,
521 : }
522 :
523 404 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
524 404 : register_uint_gauge_vec!(
525 404 : "pageserver_layer_bytes",
526 404 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
527 404 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
528 404 : )
529 404 : .expect("failed to define a metric")
530 404 : });
531 :
532 404 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
533 404 : register_uint_gauge_vec!(
534 404 : "pageserver_layer_count",
535 404 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
536 404 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
537 404 : )
538 404 : .expect("failed to define a metric")
539 404 : });
540 :
541 412 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
542 412 : register_uint_gauge_vec!(
543 412 : "pageserver_archive_size",
544 412 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
545 412 : &["tenant_id", "shard_id", "timeline_id"]
546 412 : )
547 412 : .expect("failed to define a metric")
548 412 : });
549 :
550 412 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
551 412 : register_int_gauge_vec!(
552 412 : "pageserver_standby_horizon",
553 412 : "Standby apply LSN for which GC is hold off, by timeline.",
554 412 : &["tenant_id", "shard_id", "timeline_id"]
555 412 : )
556 412 : .expect("failed to define a metric")
557 412 : });
558 :
559 412 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
560 412 : register_uint_gauge_vec!(
561 412 : "pageserver_resident_physical_size",
562 412 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
563 412 : &["tenant_id", "shard_id", "timeline_id"]
564 412 : )
565 412 : .expect("failed to define a metric")
566 412 : });
567 :
568 412 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
569 412 : register_uint_gauge_vec!(
570 412 : "pageserver_visible_physical_size",
571 412 : "The size of the layer files present in the pageserver's filesystem.",
572 412 : &["tenant_id", "shard_id", "timeline_id"]
573 412 : )
574 412 : .expect("failed to define a metric")
575 412 : });
576 :
577 404 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
578 404 : register_uint_gauge!(
579 404 : "pageserver_resident_physical_size_global",
580 404 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
581 404 : )
582 404 : .expect("failed to define a metric")
583 404 : });
584 :
585 412 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
586 412 : register_uint_gauge_vec!(
587 412 : "pageserver_remote_physical_size",
588 412 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
589 412 : // Corollary: If any files are missing from the index part, they won't be included here.
590 412 : &["tenant_id", "shard_id", "timeline_id"]
591 412 : )
592 412 : .expect("failed to define a metric")
593 412 : });
594 :
595 412 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
596 412 : register_uint_gauge!(
597 412 : "pageserver_remote_physical_size_global",
598 412 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
599 412 : )
600 412 : .expect("failed to define a metric")
601 412 : });
602 :
603 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
604 12 : register_int_counter!(
605 12 : "pageserver_remote_ondemand_downloaded_layers_total",
606 12 : "Total on-demand downloaded layers"
607 12 : )
608 12 : .unwrap()
609 12 : });
610 :
611 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
612 12 : register_int_counter!(
613 12 : "pageserver_remote_ondemand_downloaded_bytes_total",
614 12 : "Total bytes of layers on-demand downloaded",
615 12 : )
616 12 : .unwrap()
617 12 : });
618 :
619 412 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
620 412 : register_uint_gauge_vec!(
621 412 : "pageserver_current_logical_size",
622 412 : "Current logical size grouped by timeline",
623 412 : &["tenant_id", "shard_id", "timeline_id"]
624 412 : )
625 412 : .expect("failed to define current logical size metric")
626 412 : });
627 :
628 412 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
629 412 : register_int_gauge_vec!(
630 412 : "pageserver_aux_file_estimated_size",
631 412 : "The size of all aux files for a timeline in aux file v2 store.",
632 412 : &["tenant_id", "shard_id", "timeline_id"]
633 412 : )
634 412 : .expect("failed to define a metric")
635 412 : });
636 :
637 412 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
638 412 : register_uint_gauge_vec!(
639 412 : "pageserver_valid_lsn_lease_count",
640 412 : "The number of valid leases after refreshing gc info.",
641 412 : &["tenant_id", "shard_id", "timeline_id"],
642 412 : )
643 412 : .expect("failed to define a metric")
644 412 : });
645 :
646 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
647 0 : register_int_counter!(
648 0 : "pageserver_circuit_breaker_broken",
649 0 : "How many times a circuit breaker has broken"
650 0 : )
651 0 : .expect("failed to define a metric")
652 0 : });
653 :
654 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
655 0 : register_int_counter!(
656 0 : "pageserver_circuit_breaker_unbroken",
657 0 : "How many times a circuit breaker has been un-broken (recovered)"
658 0 : )
659 0 : .expect("failed to define a metric")
660 0 : });
661 :
662 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
663 396 : register_int_counter!(
664 396 : "pageserver_compression_image_in_bytes_total",
665 396 : "Size of data written into image layers before compression"
666 396 : )
667 396 : .expect("failed to define a metric")
668 396 : });
669 :
670 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
671 396 : register_int_counter!(
672 396 : "pageserver_compression_image_in_bytes_considered",
673 396 : "Size of potentially compressible data written into image layers before compression"
674 396 : )
675 396 : .expect("failed to define a metric")
676 396 : });
677 :
678 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
679 396 : register_int_counter!(
680 396 : "pageserver_compression_image_in_bytes_chosen",
681 396 : "Size of data whose compressed form was written into image layers"
682 396 : )
683 396 : .expect("failed to define a metric")
684 396 : });
685 :
686 396 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
687 396 : register_int_counter!(
688 396 : "pageserver_compression_image_out_bytes_total",
689 396 : "Size of compressed image layer written"
690 396 : )
691 396 : .expect("failed to define a metric")
692 396 : });
693 :
694 20 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
695 20 : register_uint_gauge!(
696 20 : "pageserver_relsize_cache_entries",
697 20 : "Number of entries in the relation size cache",
698 20 : )
699 20 : .expect("failed to define a metric")
700 20 : });
701 :
702 20 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
703 20 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
704 20 : .expect("failed to define a metric")
705 20 : });
706 :
707 20 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
708 20 : register_int_counter!(
709 20 : "pageserver_relsize_cache_misses",
710 20 : "Relation size cache misses",
711 20 : )
712 20 : .expect("failed to define a metric")
713 20 : });
714 :
715 8 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
716 8 : register_int_counter!(
717 8 : "pageserver_relsize_cache_misses_old",
718 8 : "Relation size cache misses where the lookup LSN is older than the last relation update"
719 8 : )
720 8 : .expect("failed to define a metric")
721 8 : });
722 :
723 : pub(crate) mod initial_logical_size {
724 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
725 : use once_cell::sync::Lazy;
726 :
727 : pub(crate) struct StartCalculation(IntCounterVec);
728 412 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
729 412 : StartCalculation(
730 412 : register_int_counter_vec!(
731 412 : "pageserver_initial_logical_size_start_calculation",
732 412 : "Incremented each time we start an initial logical size calculation attempt. \
733 412 : The `circumstances` label provides some additional details.",
734 412 : &["attempt", "circumstances"]
735 412 : )
736 412 : .unwrap(),
737 412 : )
738 412 : });
739 :
740 : struct DropCalculation {
741 : first: IntCounter,
742 : retry: IntCounter,
743 : }
744 :
745 412 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
746 412 : let vec = register_int_counter_vec!(
747 412 : "pageserver_initial_logical_size_drop_calculation",
748 412 : "Incremented each time we abort a started size calculation attmpt.",
749 412 : &["attempt"]
750 412 : )
751 412 : .unwrap();
752 412 : DropCalculation {
753 412 : first: vec.with_label_values(&["first"]),
754 412 : retry: vec.with_label_values(&["retry"]),
755 412 : }
756 412 : });
757 :
758 : pub(crate) struct Calculated {
759 : pub(crate) births: IntCounter,
760 : pub(crate) deaths: IntCounter,
761 : }
762 :
763 412 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
764 412 : births: register_int_counter!(
765 412 : "pageserver_initial_logical_size_finish_calculation",
766 412 : "Incremented every time we finish calculation of initial logical size.\
767 412 : If everything is working well, this should happen at most once per Timeline object."
768 412 : )
769 412 : .unwrap(),
770 412 : deaths: register_int_counter!(
771 412 : "pageserver_initial_logical_size_drop_finished_calculation",
772 412 : "Incremented when we drop a finished initial logical size calculation result.\
773 412 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
774 412 : )
775 412 : .unwrap(),
776 412 : });
777 :
778 : pub(crate) struct OngoingCalculationGuard {
779 : inc_drop_calculation: Option<IntCounter>,
780 : }
781 :
782 : #[derive(strum_macros::IntoStaticStr)]
783 : pub(crate) enum StartCircumstances {
784 : EmptyInitial,
785 : SkippedConcurrencyLimiter,
786 : AfterBackgroundTasksRateLimit,
787 : }
788 :
789 : impl StartCalculation {
790 436 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
791 436 : let circumstances_label: &'static str = circumstances.into();
792 436 : self.0
793 436 : .with_label_values(&["first", circumstances_label])
794 436 : .inc();
795 436 : OngoingCalculationGuard {
796 436 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
797 436 : }
798 436 : }
799 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
800 0 : let circumstances_label: &'static str = circumstances.into();
801 0 : self.0
802 0 : .with_label_values(&["retry", circumstances_label])
803 0 : .inc();
804 0 : OngoingCalculationGuard {
805 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
806 0 : }
807 0 : }
808 : }
809 :
810 : impl Drop for OngoingCalculationGuard {
811 436 : fn drop(&mut self) {
812 436 : if let Some(counter) = self.inc_drop_calculation.take() {
813 0 : counter.inc();
814 436 : }
815 436 : }
816 : }
817 :
818 : impl OngoingCalculationGuard {
819 436 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
820 436 : drop(self.inc_drop_calculation.take());
821 436 : CALCULATED.births.inc();
822 436 : FinishedCalculationGuard {
823 436 : inc_on_drop: CALCULATED.deaths.clone(),
824 436 : }
825 436 : }
826 : }
827 :
828 : pub(crate) struct FinishedCalculationGuard {
829 : inc_on_drop: IntCounter,
830 : }
831 :
832 : impl Drop for FinishedCalculationGuard {
833 12 : fn drop(&mut self) {
834 12 : self.inc_on_drop.inc();
835 12 : }
836 : }
837 :
838 : // context: https://github.com/neondatabase/neon/issues/5963
839 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
840 0 : Lazy::new(|| {
841 0 : register_int_counter!(
842 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
843 0 : "Counter for the following event: walreceiver calls\
844 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
845 0 : )
846 0 : .unwrap()
847 0 : });
848 : }
849 :
850 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
851 0 : register_uint_gauge_vec!(
852 0 : "pageserver_directory_entries_count",
853 0 : "Sum of the entries in pageserver-stored directory listings",
854 0 : &["tenant_id", "shard_id", "timeline_id"]
855 0 : )
856 0 : .expect("failed to define a metric")
857 0 : });
858 :
859 416 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
860 416 : register_uint_gauge_vec!(
861 416 : "pageserver_tenant_states_count",
862 416 : "Count of tenants per state",
863 416 : &["state"]
864 416 : )
865 416 : .expect("Failed to register pageserver_tenant_states_count metric")
866 416 : });
867 :
868 : /// A set of broken tenants.
869 : ///
870 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
871 : /// tenant.
872 20 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
873 20 : register_uint_gauge_vec!(
874 20 : "pageserver_broken_tenants_count",
875 20 : "Set of broken tenants",
876 20 : &["tenant_id", "shard_id"]
877 20 : )
878 20 : .expect("Failed to register pageserver_tenant_states_count metric")
879 20 : });
880 :
881 12 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
882 12 : register_uint_gauge_vec!(
883 12 : "pageserver_tenant_synthetic_cached_size_bytes",
884 12 : "Synthetic size of each tenant in bytes",
885 12 : &["tenant_id"]
886 12 : )
887 12 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
888 12 : });
889 :
890 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
891 0 : register_histogram_vec!(
892 0 : "pageserver_eviction_iteration_duration_seconds_global",
893 0 : "Time spent on a single eviction iteration",
894 0 : &["period_secs", "threshold_secs"],
895 0 : STORAGE_OP_BUCKETS.into(),
896 0 : )
897 0 : .expect("failed to define a metric")
898 0 : });
899 :
900 412 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
901 412 : register_int_counter_vec!(
902 412 : "pageserver_evictions",
903 412 : "Number of layers evicted from the pageserver",
904 412 : &["tenant_id", "shard_id", "timeline_id"]
905 412 : )
906 412 : .expect("failed to define a metric")
907 412 : });
908 :
909 412 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
910 412 : register_int_counter_vec!(
911 412 : "pageserver_evictions_with_low_residence_duration",
912 412 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
913 412 : Residence duration is determined using the `residence_duration_data_source`.",
914 412 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
915 412 : )
916 412 : .expect("failed to define a metric")
917 412 : });
918 :
919 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
920 0 : register_int_counter!(
921 0 : "pageserver_unexpected_ondemand_downloads_count",
922 0 : "Number of unexpected on-demand downloads. \
923 0 : We log more context for each increment, so, forgo any labels in this metric.",
924 0 : )
925 0 : .expect("failed to define a metric")
926 0 : });
927 :
928 : /// How long did we take to start up? Broken down by labels to describe
929 : /// different phases of startup.
930 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
931 0 : register_gauge_vec!(
932 0 : "pageserver_startup_duration_seconds",
933 0 : "Time taken by phases of pageserver startup, in seconds",
934 0 : &["phase"]
935 0 : )
936 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
937 0 : });
938 :
939 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
940 0 : register_uint_gauge!(
941 0 : "pageserver_startup_is_loading",
942 0 : "1 while in initial startup load of tenants, 0 at other times"
943 0 : )
944 0 : .expect("Failed to register pageserver_startup_is_loading")
945 0 : });
946 :
947 404 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
948 404 : register_uint_gauge!(
949 404 : "pageserver_timeline_ephemeral_bytes",
950 404 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
951 404 : )
952 404 : .expect("Failed to register metric")
953 404 : });
954 :
955 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
956 : /// like how long it took to load.
957 : ///
958 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
959 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
960 : /// at a timeline level than tenant level.
961 : pub(crate) struct TenantMetrics {
962 : /// How long did tenants take to go from construction to active state?
963 : pub(crate) activation: Histogram,
964 : pub(crate) preload: Histogram,
965 : pub(crate) attach: Histogram,
966 :
967 : /// How many tenants are included in the initial startup of the pagesrever?
968 : pub(crate) startup_scheduled: IntCounter,
969 : pub(crate) startup_complete: IntCounter,
970 : }
971 :
972 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
973 0 : TenantMetrics {
974 0 : activation: register_histogram!(
975 0 : "pageserver_tenant_activation_seconds",
976 0 : "Time taken by tenants to activate, in seconds",
977 0 : CRITICAL_OP_BUCKETS.into()
978 0 : )
979 0 : .expect("Failed to register metric"),
980 0 : preload: register_histogram!(
981 0 : "pageserver_tenant_preload_seconds",
982 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
983 0 : CRITICAL_OP_BUCKETS.into()
984 0 : )
985 0 : .expect("Failed to register metric"),
986 0 : attach: register_histogram!(
987 0 : "pageserver_tenant_attach_seconds",
988 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
989 0 : CRITICAL_OP_BUCKETS.into()
990 0 : )
991 0 : .expect("Failed to register metric"),
992 0 : startup_scheduled: register_int_counter!(
993 0 : "pageserver_tenant_startup_scheduled",
994 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
995 0 : ).expect("Failed to register metric"),
996 0 : startup_complete: register_int_counter!(
997 0 : "pageserver_tenant_startup_complete",
998 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
999 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1000 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1001 0 : ).expect("Failed to register metric"),
1002 0 : }
1003 0 : });
1004 :
1005 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1006 : #[derive(Debug)]
1007 : pub(crate) struct EvictionsWithLowResidenceDuration {
1008 : data_source: &'static str,
1009 : threshold: Duration,
1010 : counter: Option<IntCounter>,
1011 : }
1012 :
1013 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1014 : data_source: &'static str,
1015 : threshold: Duration,
1016 : }
1017 :
1018 : impl EvictionsWithLowResidenceDurationBuilder {
1019 904 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1020 904 : Self {
1021 904 : data_source,
1022 904 : threshold,
1023 904 : }
1024 904 : }
1025 :
1026 904 : fn build(
1027 904 : &self,
1028 904 : tenant_id: &str,
1029 904 : shard_id: &str,
1030 904 : timeline_id: &str,
1031 904 : ) -> EvictionsWithLowResidenceDuration {
1032 904 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1033 904 : .get_metric_with_label_values(&[
1034 904 : tenant_id,
1035 904 : shard_id,
1036 904 : timeline_id,
1037 904 : self.data_source,
1038 904 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1039 904 : ])
1040 904 : .unwrap();
1041 904 : EvictionsWithLowResidenceDuration {
1042 904 : data_source: self.data_source,
1043 904 : threshold: self.threshold,
1044 904 : counter: Some(counter),
1045 904 : }
1046 904 : }
1047 : }
1048 :
1049 : impl EvictionsWithLowResidenceDuration {
1050 924 : fn threshold_label_value(threshold: Duration) -> String {
1051 924 : format!("{}", threshold.as_secs())
1052 924 : }
1053 :
1054 8 : pub fn observe(&self, observed_value: Duration) {
1055 8 : if observed_value < self.threshold {
1056 8 : self.counter
1057 8 : .as_ref()
1058 8 : .expect("nobody calls this function after `remove_from_vec`")
1059 8 : .inc();
1060 8 : }
1061 8 : }
1062 :
1063 0 : pub fn change_threshold(
1064 0 : &mut self,
1065 0 : tenant_id: &str,
1066 0 : shard_id: &str,
1067 0 : timeline_id: &str,
1068 0 : new_threshold: Duration,
1069 0 : ) {
1070 0 : if new_threshold == self.threshold {
1071 0 : return;
1072 0 : }
1073 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1074 0 : self.data_source,
1075 0 : new_threshold,
1076 0 : )
1077 0 : .build(tenant_id, shard_id, timeline_id);
1078 0 : std::mem::swap(self, &mut with_new);
1079 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1080 0 : }
1081 :
1082 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1083 20 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1084 20 : let Some(_counter) = self.counter.take() else {
1085 0 : return;
1086 : };
1087 :
1088 20 : let threshold = Self::threshold_label_value(self.threshold);
1089 20 :
1090 20 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1091 20 : tenant_id,
1092 20 : shard_id,
1093 20 : timeline_id,
1094 20 : self.data_source,
1095 20 : &threshold,
1096 20 : ]);
1097 20 :
1098 20 : match removed {
1099 0 : Err(e) => {
1100 0 : // this has been hit in staging as
1101 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1102 0 : // because we can be in the drop path already, don't risk:
1103 0 : // - "double-panic => illegal instruction" or
1104 0 : // - future "drop panick => abort"
1105 0 : //
1106 0 : // so just nag: (the error has the labels)
1107 0 : tracing::warn!(
1108 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1109 : );
1110 : }
1111 : Ok(()) => {
1112 : // to help identify cases where we double-remove the same values, let's log all
1113 : // deletions?
1114 20 : tracing::info!(
1115 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1116 : self.data_source
1117 : );
1118 : }
1119 : }
1120 20 : }
1121 : }
1122 :
1123 : // Metrics collected on disk IO operations
1124 : //
1125 : // Roughly logarithmic scale.
1126 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1127 : 0.000030, // 30 usec
1128 : 0.001000, // 1000 usec
1129 : 0.030, // 30 ms
1130 : 1.000, // 1000 ms
1131 : 30.000, // 30000 ms
1132 : ];
1133 :
1134 : /// VirtualFile fs operation variants.
1135 : ///
1136 : /// Operations:
1137 : /// - open ([`std::fs::OpenOptions::open`])
1138 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1139 : /// - close-by-replace (close by replacement algorithm)
1140 : /// - read (`read_at`)
1141 : /// - write (`write_at`)
1142 : /// - seek (modify internal position or file length query)
1143 : /// - fsync ([`std::fs::File::sync_all`])
1144 : /// - metadata ([`std::fs::File::metadata`])
1145 : #[derive(
1146 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1147 : )]
1148 : pub(crate) enum StorageIoOperation {
1149 : Open,
1150 : OpenAfterReplace,
1151 : Close,
1152 : CloseByReplace,
1153 : Read,
1154 : Write,
1155 : Seek,
1156 : Fsync,
1157 : Metadata,
1158 : }
1159 :
1160 : impl StorageIoOperation {
1161 4248 : pub fn as_str(&self) -> &'static str {
1162 4248 : match self {
1163 472 : StorageIoOperation::Open => "open",
1164 472 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1165 472 : StorageIoOperation::Close => "close",
1166 472 : StorageIoOperation::CloseByReplace => "close-by-replace",
1167 472 : StorageIoOperation::Read => "read",
1168 472 : StorageIoOperation::Write => "write",
1169 472 : StorageIoOperation::Seek => "seek",
1170 472 : StorageIoOperation::Fsync => "fsync",
1171 472 : StorageIoOperation::Metadata => "metadata",
1172 : }
1173 4248 : }
1174 : }
1175 :
1176 : /// Tracks time taken by fs operations near VirtualFile.
1177 : #[derive(Debug)]
1178 : pub(crate) struct StorageIoTime {
1179 : metrics: [Histogram; StorageIoOperation::COUNT],
1180 : }
1181 :
1182 : impl StorageIoTime {
1183 472 : fn new() -> Self {
1184 472 : let storage_io_histogram_vec = register_histogram_vec!(
1185 472 : "pageserver_io_operations_seconds",
1186 472 : "Time spent in IO operations",
1187 472 : &["operation"],
1188 472 : STORAGE_IO_TIME_BUCKETS.into()
1189 472 : )
1190 472 : .expect("failed to define a metric");
1191 4248 : let metrics = std::array::from_fn(|i| {
1192 4248 : let op = StorageIoOperation::from_repr(i).unwrap();
1193 4248 : storage_io_histogram_vec
1194 4248 : .get_metric_with_label_values(&[op.as_str()])
1195 4248 : .unwrap()
1196 4248 : });
1197 472 : Self { metrics }
1198 472 : }
1199 :
1200 4025549 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1201 4025549 : &self.metrics[op as usize]
1202 4025549 : }
1203 : }
1204 :
1205 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1206 :
1207 : const STORAGE_IO_SIZE_OPERATIONS: &[&str] = &["read", "write"];
1208 :
1209 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1210 464 : pub(crate) static STORAGE_IO_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1211 464 : register_int_gauge_vec!(
1212 464 : "pageserver_io_operations_bytes_total",
1213 464 : "Total amount of bytes read/written in IO operations",
1214 464 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1215 464 : )
1216 464 : .expect("failed to define a metric")
1217 464 : });
1218 :
1219 : #[cfg(not(test))]
1220 : pub(crate) mod virtual_file_descriptor_cache {
1221 : use super::*;
1222 :
1223 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1224 0 : register_uint_gauge!(
1225 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1226 0 : "Maximum number of open file descriptors in the cache."
1227 0 : )
1228 0 : .unwrap()
1229 0 : });
1230 :
1231 : // SIZE_CURRENT: derive it like so:
1232 : // ```
1233 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1234 : // -ignoring(operation)
1235 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1236 : // ```
1237 : }
1238 :
1239 : #[cfg(not(test))]
1240 : pub(crate) mod virtual_file_io_engine {
1241 : use super::*;
1242 :
1243 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1244 0 : register_uint_gauge_vec!(
1245 0 : "pageserver_virtual_file_io_engine_kind",
1246 0 : "The configured io engine for VirtualFile",
1247 0 : &["kind"],
1248 0 : )
1249 0 : .unwrap()
1250 0 : });
1251 : }
1252 :
1253 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1254 : pub(crate) struct SmgrOpTimerInner {
1255 : global_execution_latency_histo: Histogram,
1256 : per_timeline_execution_latency_histo: Option<Histogram>,
1257 :
1258 : global_batch_wait_time: Histogram,
1259 : per_timeline_batch_wait_time: Histogram,
1260 :
1261 : global_flush_in_progress_micros: IntCounter,
1262 : per_timeline_flush_in_progress_micros: IntCounter,
1263 :
1264 : throttling: Arc<tenant_throttling::Pagestream>,
1265 :
1266 : timings: SmgrOpTimerState,
1267 : }
1268 :
1269 : /// The stages of request processing are represented by the enum variants.
1270 : /// Used as part of [`SmgrOpTimerInner::timings`].
1271 : ///
1272 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1273 : /// transition points.
1274 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1275 : /// to the next state.
1276 : ///
1277 : /// Each request goes through every stage, in all configurations.
1278 : ///
1279 : #[derive(Debug)]
1280 : enum SmgrOpTimerState {
1281 : Received {
1282 : // In the future, we may want to track the full time the request spent
1283 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1284 : // `received_at` would be used for that.
1285 : #[allow(dead_code)]
1286 : received_at: Instant,
1287 : },
1288 : Throttling {
1289 : throttle_started_at: Instant,
1290 : },
1291 : Batching {
1292 : throttle_done_at: Instant,
1293 : },
1294 : Executing {
1295 : execution_started_at: Instant,
1296 : },
1297 : Flushing,
1298 : // NB: when adding observation points, remember to update the Drop impl.
1299 : }
1300 :
1301 : // NB: when adding observation points, remember to update the Drop impl.
1302 : impl SmgrOpTimer {
1303 : /// See [`SmgrOpTimerState`] for more context.
1304 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1305 0 : let Some(inner) = self.0.as_mut() else {
1306 0 : return;
1307 : };
1308 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1309 0 : return;
1310 : };
1311 0 : inner.throttling.count_accounted_start.inc();
1312 0 : inner.timings = SmgrOpTimerState::Throttling {
1313 0 : throttle_started_at: at,
1314 0 : };
1315 0 : }
1316 :
1317 : /// See [`SmgrOpTimerState`] for more context.
1318 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1319 0 : let Some(inner) = self.0.as_mut() else {
1320 0 : return;
1321 : };
1322 : let SmgrOpTimerState::Throttling {
1323 0 : throttle_started_at,
1324 0 : } = &inner.timings
1325 : else {
1326 0 : return;
1327 : };
1328 0 : inner.throttling.count_accounted_finish.inc();
1329 0 : match throttle {
1330 0 : ThrottleResult::NotThrottled { end } => {
1331 0 : inner.timings = SmgrOpTimerState::Batching {
1332 0 : throttle_done_at: end,
1333 0 : };
1334 0 : }
1335 0 : ThrottleResult::Throttled { end } => {
1336 0 : // update metrics
1337 0 : inner.throttling.count_throttled.inc();
1338 0 : inner
1339 0 : .throttling
1340 0 : .wait_time
1341 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1342 0 : // state transition
1343 0 : inner.timings = SmgrOpTimerState::Batching {
1344 0 : throttle_done_at: end,
1345 0 : };
1346 0 : }
1347 : }
1348 0 : }
1349 :
1350 : /// See [`SmgrOpTimerState`] for more context.
1351 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1352 0 : let Some(inner) = self.0.as_mut() else {
1353 0 : return;
1354 : };
1355 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1356 0 : return;
1357 : };
1358 : // update metrics
1359 0 : let batch = at - *throttle_done_at;
1360 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1361 0 : inner
1362 0 : .per_timeline_batch_wait_time
1363 0 : .observe(batch.as_secs_f64());
1364 0 : // state transition
1365 0 : inner.timings = SmgrOpTimerState::Executing {
1366 0 : execution_started_at: at,
1367 0 : }
1368 0 : }
1369 :
1370 : /// For all but the first caller, this is a no-op.
1371 : /// The first callers receives Some, subsequent ones None.
1372 : ///
1373 : /// See [`SmgrOpTimerState`] for more context.
1374 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1375 : // NB: unlike the other observe_* methods, this one take()s.
1376 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1377 0 : let Some(mut inner) = self.0.take() else {
1378 0 : return None;
1379 : };
1380 : let SmgrOpTimerState::Executing {
1381 0 : execution_started_at,
1382 0 : } = &inner.timings
1383 : else {
1384 0 : return None;
1385 : };
1386 : // update metrics
1387 0 : let execution = at - *execution_started_at;
1388 0 : inner
1389 0 : .global_execution_latency_histo
1390 0 : .observe(execution.as_secs_f64());
1391 0 : if let Some(per_timeline_execution_latency_histo) =
1392 0 : &inner.per_timeline_execution_latency_histo
1393 0 : {
1394 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1395 0 : }
1396 :
1397 : // state transition
1398 0 : inner.timings = SmgrOpTimerState::Flushing;
1399 0 :
1400 0 : // return the flush in progress object which
1401 0 : // will do the remaining metrics updates
1402 0 : let SmgrOpTimerInner {
1403 0 : global_flush_in_progress_micros,
1404 0 : per_timeline_flush_in_progress_micros,
1405 0 : ..
1406 0 : } = inner;
1407 0 : Some(SmgrOpFlushInProgress {
1408 0 : global_micros: global_flush_in_progress_micros,
1409 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1410 0 : })
1411 0 : }
1412 : }
1413 :
1414 : /// The last stage of request processing is serializing and flushing the request
1415 : /// into the TCP connection. We want to make slow flushes observable
1416 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1417 : /// to periodically bump the metric.
1418 : ///
1419 : /// If in the future we decide that we're not interested in live updates, we can
1420 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1421 : /// and remove this struct from the code base.
1422 : pub(crate) struct SmgrOpFlushInProgress {
1423 : global_micros: IntCounter,
1424 : per_timeline_micros: IntCounter,
1425 : }
1426 :
1427 : impl Drop for SmgrOpTimer {
1428 0 : fn drop(&mut self) {
1429 0 : // In case of early drop, update any of the remaining metrics with
1430 0 : // observations so that (started,finished) counter pairs balance out
1431 0 : // and all counters on the latency path have the the same number of
1432 0 : // observations.
1433 0 : // It's technically lying and it would be better if each metric had
1434 0 : // a separate label or similar for cancelled requests.
1435 0 : // But we don't have that right now and counter pairs balancing
1436 0 : // out is useful when using the metrics in panels and whatnot.
1437 0 : let now = Instant::now();
1438 0 : self.observe_throttle_start(now);
1439 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1440 0 : self.observe_execution_start(now);
1441 0 : let maybe_flush_timer = self.observe_execution_end(now);
1442 0 : drop(maybe_flush_timer);
1443 0 : }
1444 : }
1445 :
1446 : impl SmgrOpFlushInProgress {
1447 : /// The caller must guarantee that `socket_fd`` outlives this function.
1448 0 : pub(crate) async fn measure<Fut, O>(
1449 0 : self,
1450 0 : started_at: Instant,
1451 0 : mut fut: Fut,
1452 0 : socket_fd: RawFd,
1453 0 : ) -> O
1454 0 : where
1455 0 : Fut: std::future::Future<Output = O>,
1456 0 : {
1457 0 : let mut fut = std::pin::pin!(fut);
1458 0 :
1459 0 : let mut logged = false;
1460 0 : let mut last_counter_increment_at = started_at;
1461 0 : let mut observe_guard = scopeguard::guard(
1462 0 : |is_timeout| {
1463 0 : let now = Instant::now();
1464 0 :
1465 0 : // Increment counter
1466 0 : {
1467 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1468 0 : self.global_micros
1469 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1470 0 : self.per_timeline_micros
1471 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1472 0 : last_counter_increment_at = now;
1473 0 : }
1474 0 :
1475 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1476 0 : if is_timeout || logged {
1477 0 : logged = true;
1478 0 : let elapsed_total = now - started_at;
1479 0 : let msg = if is_timeout {
1480 0 : "slow flush ongoing"
1481 : } else {
1482 0 : "slow flush completed or cancelled"
1483 : };
1484 :
1485 0 : let (inq, outq) = {
1486 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1487 0 : #[cfg(target_os = "linux")]
1488 0 : unsafe {
1489 0 : (
1490 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1491 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1492 0 : )
1493 0 : }
1494 0 : #[cfg(not(target_os = "linux"))]
1495 0 : {
1496 0 : _ = socket_fd; // appease unused lint on macOS
1497 0 : (-1, -1)
1498 0 : }
1499 0 : };
1500 0 :
1501 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1502 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1503 0 : }
1504 0 : },
1505 0 : |mut observe| {
1506 0 : observe(false);
1507 0 : },
1508 0 : );
1509 :
1510 : loop {
1511 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1512 0 : Ok(v) => return v,
1513 0 : Err(_timeout) => {
1514 0 : (*observe_guard)(true);
1515 0 : }
1516 : }
1517 : }
1518 0 : }
1519 : }
1520 :
1521 : #[derive(
1522 : Debug,
1523 : Clone,
1524 : Copy,
1525 : IntoStaticStr,
1526 : strum_macros::EnumCount,
1527 0 : strum_macros::EnumIter,
1528 : strum_macros::FromRepr,
1529 : enum_map::Enum,
1530 : )]
1531 : #[strum(serialize_all = "snake_case")]
1532 : pub enum SmgrQueryType {
1533 : GetRelExists,
1534 : GetRelSize,
1535 : GetPageAtLsn,
1536 : GetDbSize,
1537 : GetSlruSegment,
1538 : #[cfg(feature = "testing")]
1539 : Test,
1540 : }
1541 :
1542 : pub(crate) struct SmgrQueryTimePerTimeline {
1543 : global_started: [IntCounter; SmgrQueryType::COUNT],
1544 : global_latency: [Histogram; SmgrQueryType::COUNT],
1545 : per_timeline_getpage_started: IntCounter,
1546 : per_timeline_getpage_latency: Histogram,
1547 : global_batch_size: Histogram,
1548 : per_timeline_batch_size: Histogram,
1549 : global_flush_in_progress_micros: IntCounter,
1550 : per_timeline_flush_in_progress_micros: IntCounter,
1551 : global_batch_wait_time: Histogram,
1552 : per_timeline_batch_wait_time: Histogram,
1553 : throttling: Arc<tenant_throttling::Pagestream>,
1554 : }
1555 :
1556 412 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1557 412 : register_int_counter_vec!(
1558 412 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1559 412 : "pageserver_smgr_query_started_global_count",
1560 412 : "Number of smgr queries started, aggregated by query type.",
1561 412 : &["smgr_query_type"],
1562 412 : )
1563 412 : .expect("failed to define a metric")
1564 412 : });
1565 :
1566 412 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1567 412 : register_int_counter_vec!(
1568 412 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1569 412 : "pageserver_smgr_query_started_count",
1570 412 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1571 412 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1572 412 : )
1573 412 : .expect("failed to define a metric")
1574 412 : });
1575 :
1576 : // Alias so all histograms recording per-timeline smgr timings use the same buckets.
1577 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
1578 :
1579 412 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1580 412 : register_histogram_vec!(
1581 412 : "pageserver_smgr_query_seconds",
1582 412 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1583 412 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1584 412 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1585 412 : )
1586 412 : .expect("failed to define a metric")
1587 412 : });
1588 :
1589 412 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1590 412 : [
1591 412 : 1,
1592 412 : 10,
1593 412 : 20,
1594 412 : 40,
1595 412 : 60,
1596 412 : 80,
1597 412 : 100,
1598 412 : 200,
1599 412 : 300,
1600 412 : 400,
1601 412 : 500,
1602 412 : 600,
1603 412 : 700,
1604 412 : 800,
1605 412 : 900,
1606 412 : 1_000, // 1ms
1607 412 : 2_000,
1608 412 : 4_000,
1609 412 : 6_000,
1610 412 : 8_000,
1611 412 : 10_000, // 10ms
1612 412 : 20_000,
1613 412 : 40_000,
1614 412 : 60_000,
1615 412 : 80_000,
1616 412 : 100_000,
1617 412 : 200_000,
1618 412 : 400_000,
1619 412 : 600_000,
1620 412 : 800_000,
1621 412 : 1_000_000, // 1s
1622 412 : 2_000_000,
1623 412 : 4_000_000,
1624 412 : 6_000_000,
1625 412 : 8_000_000,
1626 412 : 10_000_000, // 10s
1627 412 : 20_000_000,
1628 412 : 50_000_000,
1629 412 : 100_000_000,
1630 412 : 200_000_000,
1631 412 : 1_000_000_000, // 1000s
1632 412 : ]
1633 412 : .into_iter()
1634 412 : .map(Duration::from_micros)
1635 16892 : .map(|d| d.as_secs_f64())
1636 412 : .collect()
1637 412 : });
1638 :
1639 412 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1640 412 : register_histogram_vec!(
1641 412 : "pageserver_smgr_query_seconds_global",
1642 412 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1643 412 : &["smgr_query_type"],
1644 412 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1645 412 : )
1646 412 : .expect("failed to define a metric")
1647 412 : });
1648 :
1649 412 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1650 412 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1651 13184 : .map(|v| v.into())
1652 412 : .collect()
1653 412 : });
1654 :
1655 412 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1656 412 : register_histogram!(
1657 412 : "pageserver_page_service_batch_size_global",
1658 412 : "Batch size of pageserver page service requests",
1659 412 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1660 412 : )
1661 412 : .expect("failed to define a metric")
1662 412 : });
1663 :
1664 412 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1665 412 : let mut buckets = Vec::new();
1666 2884 : for i in 0.. {
1667 2884 : let bucket = 1 << i;
1668 2884 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1669 412 : break;
1670 2472 : }
1671 2472 : buckets.push(bucket.into());
1672 : }
1673 412 : buckets
1674 412 : });
1675 :
1676 412 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1677 412 : register_histogram_vec!(
1678 412 : "pageserver_page_service_batch_size",
1679 412 : "Batch size of pageserver page service requests",
1680 412 : &["tenant_id", "shard_id", "timeline_id"],
1681 412 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1682 412 : )
1683 412 : .expect("failed to define a metric")
1684 412 : });
1685 :
1686 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1687 0 : register_int_gauge_vec!(
1688 0 : "pageserver_page_service_config_max_batch_size",
1689 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1690 0 : Labels expose more of the configuration parameters.",
1691 0 : &["mode", "execution"]
1692 0 : )
1693 0 : .expect("failed to define a metric")
1694 0 : });
1695 :
1696 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1697 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1698 0 : let (label_values, value) = match conf {
1699 0 : PageServicePipeliningConfig::Serial => (["serial", "-"], 1),
1700 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1701 0 : max_batch_size,
1702 0 : execution,
1703 0 : }) => {
1704 0 : let mode = "pipelined";
1705 0 : let execution = match execution {
1706 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1707 0 : "concurrent-futures"
1708 : }
1709 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1710 : };
1711 0 : ([mode, execution], max_batch_size.get())
1712 : }
1713 : };
1714 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1715 0 : .with_label_values(&label_values)
1716 0 : .set(value.try_into().unwrap());
1717 0 : }
1718 :
1719 412 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1720 412 : register_int_counter_vec!(
1721 412 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1722 412 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1723 412 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1724 412 : easily discoverable in monitoring. \
1725 412 : Hence, this is NOT a completion latency historgram.",
1726 412 : &["tenant_id", "shard_id", "timeline_id"],
1727 412 : )
1728 412 : .expect("failed to define a metric")
1729 412 : });
1730 :
1731 412 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
1732 412 : register_int_counter!(
1733 412 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
1734 412 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
1735 412 : )
1736 412 : .expect("failed to define a metric")
1737 412 : });
1738 :
1739 412 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1740 412 : register_histogram_vec!(
1741 412 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
1742 412 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
1743 412 : &["tenant_id", "shard_id", "timeline_id"],
1744 412 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1745 412 : )
1746 412 : .expect("failed to define a metric")
1747 412 : });
1748 :
1749 412 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1750 412 : register_histogram!(
1751 412 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
1752 412 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
1753 412 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
1754 412 : )
1755 412 : .expect("failed to define a metric")
1756 412 : });
1757 :
1758 : impl SmgrQueryTimePerTimeline {
1759 904 : pub(crate) fn new(
1760 904 : tenant_shard_id: &TenantShardId,
1761 904 : timeline_id: &TimelineId,
1762 904 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
1763 904 : ) -> Self {
1764 904 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1765 904 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1766 904 : let timeline_id = timeline_id.to_string();
1767 5424 : let global_started = std::array::from_fn(|i| {
1768 5424 : let op = SmgrQueryType::from_repr(i).unwrap();
1769 5424 : SMGR_QUERY_STARTED_GLOBAL
1770 5424 : .get_metric_with_label_values(&[op.into()])
1771 5424 : .unwrap()
1772 5424 : });
1773 5424 : let global_latency = std::array::from_fn(|i| {
1774 5424 : let op = SmgrQueryType::from_repr(i).unwrap();
1775 5424 : SMGR_QUERY_TIME_GLOBAL
1776 5424 : .get_metric_with_label_values(&[op.into()])
1777 5424 : .unwrap()
1778 5424 : });
1779 904 :
1780 904 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
1781 904 : .get_metric_with_label_values(&[
1782 904 : SmgrQueryType::GetPageAtLsn.into(),
1783 904 : &tenant_id,
1784 904 : &shard_slug,
1785 904 : &timeline_id,
1786 904 : ])
1787 904 : .unwrap();
1788 904 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1789 904 : .get_metric_with_label_values(&[
1790 904 : SmgrQueryType::GetPageAtLsn.into(),
1791 904 : &tenant_id,
1792 904 : &shard_slug,
1793 904 : &timeline_id,
1794 904 : ])
1795 904 : .unwrap();
1796 904 :
1797 904 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
1798 904 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
1799 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1800 904 : .unwrap();
1801 904 :
1802 904 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
1803 904 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
1804 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1805 904 : .unwrap();
1806 904 :
1807 904 : let global_flush_in_progress_micros =
1808 904 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
1809 904 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
1810 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1811 904 : .unwrap();
1812 904 :
1813 904 : Self {
1814 904 : global_started,
1815 904 : global_latency,
1816 904 : per_timeline_getpage_latency,
1817 904 : per_timeline_getpage_started,
1818 904 : global_batch_size,
1819 904 : per_timeline_batch_size,
1820 904 : global_flush_in_progress_micros,
1821 904 : per_timeline_flush_in_progress_micros,
1822 904 : global_batch_wait_time,
1823 904 : per_timeline_batch_wait_time,
1824 904 : throttling: pagestream_throttle_metrics,
1825 904 : }
1826 904 : }
1827 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
1828 0 : self.global_started[op as usize].inc();
1829 :
1830 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1831 0 : self.per_timeline_getpage_started.inc();
1832 0 : Some(self.per_timeline_getpage_latency.clone())
1833 : } else {
1834 0 : None
1835 : };
1836 :
1837 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
1838 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
1839 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
1840 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
1841 0 : per_timeline_flush_in_progress_micros: self
1842 0 : .per_timeline_flush_in_progress_micros
1843 0 : .clone(),
1844 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
1845 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
1846 0 : throttling: self.throttling.clone(),
1847 0 : timings: SmgrOpTimerState::Received { received_at },
1848 0 : }))
1849 0 : }
1850 :
1851 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
1852 0 : pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) {
1853 0 : self.global_batch_size.observe(batch_size as f64);
1854 0 : self.per_timeline_batch_size.observe(batch_size as f64);
1855 0 : }
1856 : }
1857 :
1858 : // keep in sync with control plane Go code so that we can validate
1859 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1860 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1861 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1862 0 : [
1863 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1864 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1865 0 : ]
1866 0 : .map(|ms| (ms as f64) / 1000.0)
1867 0 : });
1868 :
1869 : pub(crate) struct BasebackupQueryTime {
1870 : ok: Histogram,
1871 : error: Histogram,
1872 : client_error: Histogram,
1873 : }
1874 :
1875 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1876 0 : let vec = register_histogram_vec!(
1877 0 : "pageserver_basebackup_query_seconds",
1878 0 : "Histogram of basebackup queries durations, by result type",
1879 0 : &["result"],
1880 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1881 0 : )
1882 0 : .expect("failed to define a metric");
1883 0 : BasebackupQueryTime {
1884 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1885 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1886 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
1887 0 : }
1888 0 : });
1889 :
1890 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
1891 : parent: &'a BasebackupQueryTime,
1892 : start: std::time::Instant,
1893 : }
1894 :
1895 : impl BasebackupQueryTime {
1896 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
1897 0 : let start = Instant::now();
1898 0 : BasebackupQueryTimeOngoingRecording {
1899 0 : parent: self,
1900 0 : start,
1901 0 : }
1902 0 : }
1903 : }
1904 :
1905 : impl BasebackupQueryTimeOngoingRecording<'_> {
1906 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
1907 0 : let elapsed = self.start.elapsed().as_secs_f64();
1908 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
1909 0 : let metric = match res {
1910 0 : Ok(_) => &self.parent.ok,
1911 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
1912 0 : if is_expected_io_error(io_error) =>
1913 0 : {
1914 0 : &self.parent.client_error
1915 : }
1916 0 : Err(_) => &self.parent.error,
1917 : };
1918 0 : metric.observe(elapsed);
1919 0 : }
1920 : }
1921 :
1922 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1923 0 : register_int_counter_pair_vec!(
1924 0 : "pageserver_live_connections_started",
1925 0 : "Number of network connections that we started handling",
1926 0 : "pageserver_live_connections_finished",
1927 0 : "Number of network connections that we finished handling",
1928 0 : &["pageserver_connection_kind"]
1929 0 : )
1930 0 : .expect("failed to define a metric")
1931 0 : });
1932 :
1933 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
1934 : pub(crate) enum ComputeCommandKind {
1935 : PageStreamV3,
1936 : PageStreamV2,
1937 : Basebackup,
1938 : Fullbackup,
1939 : LeaseLsn,
1940 : }
1941 :
1942 : pub(crate) struct ComputeCommandCounters {
1943 : map: EnumMap<ComputeCommandKind, IntCounter>,
1944 : }
1945 :
1946 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
1947 0 : let inner = register_int_counter_vec!(
1948 0 : "pageserver_compute_commands",
1949 0 : "Number of compute -> pageserver commands processed",
1950 0 : &["command"]
1951 0 : )
1952 0 : .expect("failed to define a metric");
1953 0 :
1954 0 : ComputeCommandCounters {
1955 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
1956 0 : let command = ComputeCommandKind::from_usize(i);
1957 0 : let command_str: &'static str = command.into();
1958 0 : inner.with_label_values(&[command_str])
1959 0 : })),
1960 0 : }
1961 0 : });
1962 :
1963 : impl ComputeCommandCounters {
1964 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
1965 0 : &self.map[command]
1966 0 : }
1967 : }
1968 :
1969 : // remote storage metrics
1970 :
1971 404 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1972 404 : register_int_counter_pair_vec!(
1973 404 : "pageserver_remote_timeline_client_calls_started",
1974 404 : "Number of started calls to remote timeline client.",
1975 404 : "pageserver_remote_timeline_client_calls_finished",
1976 404 : "Number of finshed calls to remote timeline client.",
1977 404 : &[
1978 404 : "tenant_id",
1979 404 : "shard_id",
1980 404 : "timeline_id",
1981 404 : "file_kind",
1982 404 : "op_kind"
1983 404 : ],
1984 404 : )
1985 404 : .unwrap()
1986 404 : });
1987 :
1988 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
1989 400 : Lazy::new(|| {
1990 400 : register_int_counter_vec!(
1991 400 : "pageserver_remote_timeline_client_bytes_started",
1992 400 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1993 400 : The increment happens when the operation is scheduled.",
1994 400 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1995 400 : )
1996 400 : .expect("failed to define a metric")
1997 400 : });
1998 :
1999 400 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2000 400 : register_int_counter_vec!(
2001 400 : "pageserver_remote_timeline_client_bytes_finished",
2002 400 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2003 400 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2004 400 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2005 400 : )
2006 400 : .expect("failed to define a metric")
2007 400 : });
2008 :
2009 : pub(crate) struct TenantManagerMetrics {
2010 : tenant_slots_attached: UIntGauge,
2011 : tenant_slots_secondary: UIntGauge,
2012 : tenant_slots_inprogress: UIntGauge,
2013 : pub(crate) tenant_slot_writes: IntCounter,
2014 : pub(crate) unexpected_errors: IntCounter,
2015 : }
2016 :
2017 : impl TenantManagerMetrics {
2018 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2019 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2020 4 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2021 4 : match slot {
2022 0 : TenantSlot::Attached(_) => {
2023 0 : self.tenant_slots_attached.inc();
2024 0 : }
2025 0 : TenantSlot::Secondary(_) => {
2026 0 : self.tenant_slots_secondary.inc();
2027 0 : }
2028 4 : TenantSlot::InProgress(_) => {
2029 4 : self.tenant_slots_inprogress.inc();
2030 4 : }
2031 : }
2032 4 : }
2033 :
2034 4 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2035 4 : match slot {
2036 4 : TenantSlot::Attached(_) => {
2037 4 : self.tenant_slots_attached.dec();
2038 4 : }
2039 0 : TenantSlot::Secondary(_) => {
2040 0 : self.tenant_slots_secondary.dec();
2041 0 : }
2042 0 : TenantSlot::InProgress(_) => {
2043 0 : self.tenant_slots_inprogress.dec();
2044 0 : }
2045 : }
2046 4 : }
2047 :
2048 : #[cfg(all(debug_assertions, not(test)))]
2049 0 : pub(crate) fn slots_total(&self) -> u64 {
2050 0 : self.tenant_slots_attached.get()
2051 0 : + self.tenant_slots_secondary.get()
2052 0 : + self.tenant_slots_inprogress.get()
2053 0 : }
2054 : }
2055 :
2056 4 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2057 4 : let tenant_slots = register_uint_gauge_vec!(
2058 4 : "pageserver_tenant_manager_slots",
2059 4 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2060 4 : &["mode"]
2061 4 : )
2062 4 : .expect("failed to define a metric");
2063 4 : TenantManagerMetrics {
2064 4 : tenant_slots_attached: tenant_slots
2065 4 : .get_metric_with_label_values(&["attached"])
2066 4 : .unwrap(),
2067 4 : tenant_slots_secondary: tenant_slots
2068 4 : .get_metric_with_label_values(&["secondary"])
2069 4 : .unwrap(),
2070 4 : tenant_slots_inprogress: tenant_slots
2071 4 : .get_metric_with_label_values(&["inprogress"])
2072 4 : .unwrap(),
2073 4 : tenant_slot_writes: register_int_counter!(
2074 4 : "pageserver_tenant_manager_slot_writes",
2075 4 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2076 4 : )
2077 4 : .expect("failed to define a metric"),
2078 4 : unexpected_errors: register_int_counter!(
2079 4 : "pageserver_tenant_manager_unexpected_errors_total",
2080 4 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2081 4 : )
2082 4 : .expect("failed to define a metric"),
2083 4 : }
2084 4 : });
2085 :
2086 : pub(crate) struct DeletionQueueMetrics {
2087 : pub(crate) keys_submitted: IntCounter,
2088 : pub(crate) keys_dropped: IntCounter,
2089 : pub(crate) keys_executed: IntCounter,
2090 : pub(crate) keys_validated: IntCounter,
2091 : pub(crate) dropped_lsn_updates: IntCounter,
2092 : pub(crate) unexpected_errors: IntCounter,
2093 : pub(crate) remote_errors: IntCounterVec,
2094 : }
2095 70 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2096 70 : DeletionQueueMetrics{
2097 70 :
2098 70 : keys_submitted: register_int_counter!(
2099 70 : "pageserver_deletion_queue_submitted_total",
2100 70 : "Number of objects submitted for deletion"
2101 70 : )
2102 70 : .expect("failed to define a metric"),
2103 70 :
2104 70 : keys_dropped: register_int_counter!(
2105 70 : "pageserver_deletion_queue_dropped_total",
2106 70 : "Number of object deletions dropped due to stale generation."
2107 70 : )
2108 70 : .expect("failed to define a metric"),
2109 70 :
2110 70 : keys_executed: register_int_counter!(
2111 70 : "pageserver_deletion_queue_executed_total",
2112 70 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2113 70 : )
2114 70 : .expect("failed to define a metric"),
2115 70 :
2116 70 : keys_validated: register_int_counter!(
2117 70 : "pageserver_deletion_queue_validated_total",
2118 70 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2119 70 : )
2120 70 : .expect("failed to define a metric"),
2121 70 :
2122 70 : dropped_lsn_updates: register_int_counter!(
2123 70 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2124 70 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2125 70 : )
2126 70 : .expect("failed to define a metric"),
2127 70 : unexpected_errors: register_int_counter!(
2128 70 : "pageserver_deletion_queue_unexpected_errors_total",
2129 70 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2130 70 : )
2131 70 : .expect("failed to define a metric"),
2132 70 : remote_errors: register_int_counter_vec!(
2133 70 : "pageserver_deletion_queue_remote_errors_total",
2134 70 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2135 70 : &["op_kind"],
2136 70 : )
2137 70 : .expect("failed to define a metric")
2138 70 : }
2139 70 : });
2140 :
2141 : pub(crate) struct SecondaryModeMetrics {
2142 : pub(crate) upload_heatmap: IntCounter,
2143 : pub(crate) upload_heatmap_errors: IntCounter,
2144 : pub(crate) upload_heatmap_duration: Histogram,
2145 : pub(crate) download_heatmap: IntCounter,
2146 : pub(crate) download_layer: IntCounter,
2147 : }
2148 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2149 0 : SecondaryModeMetrics {
2150 0 : upload_heatmap: register_int_counter!(
2151 0 : "pageserver_secondary_upload_heatmap",
2152 0 : "Number of heatmaps written to remote storage by attached tenants"
2153 0 : )
2154 0 : .expect("failed to define a metric"),
2155 0 : upload_heatmap_errors: register_int_counter!(
2156 0 : "pageserver_secondary_upload_heatmap_errors",
2157 0 : "Failures writing heatmap to remote storage"
2158 0 : )
2159 0 : .expect("failed to define a metric"),
2160 0 : upload_heatmap_duration: register_histogram!(
2161 0 : "pageserver_secondary_upload_heatmap_duration",
2162 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2163 0 : )
2164 0 : .expect("failed to define a metric"),
2165 0 : download_heatmap: register_int_counter!(
2166 0 : "pageserver_secondary_download_heatmap",
2167 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2168 0 : )
2169 0 : .expect("failed to define a metric"),
2170 0 : download_layer: register_int_counter!(
2171 0 : "pageserver_secondary_download_layer",
2172 0 : "Number of downloads of layers by secondary mode locations"
2173 0 : )
2174 0 : .expect("failed to define a metric"),
2175 0 : }
2176 0 : });
2177 :
2178 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2179 0 : register_uint_gauge_vec!(
2180 0 : "pageserver_secondary_resident_physical_size",
2181 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2182 0 : &["tenant_id", "shard_id"]
2183 0 : )
2184 0 : .expect("failed to define a metric")
2185 0 : });
2186 :
2187 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2188 0 : register_uint_gauge!(
2189 0 : "pageserver_utilization_score",
2190 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2191 0 : )
2192 0 : .expect("failed to define a metric")
2193 0 : });
2194 :
2195 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2196 0 : register_uint_gauge_vec!(
2197 0 : "pageserver_secondary_heatmap_total_size",
2198 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2199 0 : &["tenant_id", "shard_id"]
2200 0 : )
2201 0 : .expect("failed to define a metric")
2202 0 : });
2203 :
2204 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2205 : pub enum RemoteOpKind {
2206 : Upload,
2207 : Download,
2208 : Delete,
2209 : }
2210 : impl RemoteOpKind {
2211 30715 : pub fn as_str(&self) -> &'static str {
2212 30715 : match self {
2213 28878 : Self::Upload => "upload",
2214 136 : Self::Download => "download",
2215 1701 : Self::Delete => "delete",
2216 : }
2217 30715 : }
2218 : }
2219 :
2220 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2221 : pub enum RemoteOpFileKind {
2222 : Layer,
2223 : Index,
2224 : }
2225 : impl RemoteOpFileKind {
2226 30715 : pub fn as_str(&self) -> &'static str {
2227 30715 : match self {
2228 21575 : Self::Layer => "layer",
2229 9140 : Self::Index => "index",
2230 : }
2231 30715 : }
2232 : }
2233 :
2234 400 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2235 400 : register_histogram_vec!(
2236 400 : "pageserver_remote_operation_seconds",
2237 400 : "Time spent on remote storage operations. \
2238 400 : Grouped by tenant, timeline, operation_kind and status. \
2239 400 : Does not account for time spent waiting in remote timeline client's queues.",
2240 400 : &["file_kind", "op_kind", "status"]
2241 400 : )
2242 400 : .expect("failed to define a metric")
2243 400 : });
2244 :
2245 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2246 0 : register_int_counter_vec!(
2247 0 : "pageserver_tenant_task_events",
2248 0 : "Number of task start/stop/fail events.",
2249 0 : &["event"],
2250 0 : )
2251 0 : .expect("Failed to register tenant_task_events metric")
2252 0 : });
2253 :
2254 : pub struct BackgroundLoopSemaphoreMetrics {
2255 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2256 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2257 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2258 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2259 : }
2260 :
2261 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2262 40 : Lazy::new(|| {
2263 40 : let counters = register_int_counter_pair_vec!(
2264 40 : "pageserver_background_loop_semaphore_wait_start_count",
2265 40 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2266 40 : "pageserver_background_loop_semaphore_wait_finish_count",
2267 40 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2268 40 : &["task"],
2269 40 : )
2270 40 : .unwrap();
2271 40 :
2272 40 : let durations = register_histogram_vec!(
2273 40 : "pageserver_background_loop_semaphore_wait_seconds",
2274 40 : "Seconds spent waiting on background loop semaphore acquisition",
2275 40 : &["task"],
2276 40 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2277 40 : )
2278 40 : .unwrap();
2279 40 :
2280 40 : let waiting_tasks = register_int_gauge_vec!(
2281 40 : "pageserver_background_loop_semaphore_waiting_tasks",
2282 40 : "Number of background loop tasks waiting for semaphore",
2283 40 : &["task"],
2284 40 : )
2285 40 : .unwrap();
2286 40 :
2287 40 : let running_tasks = register_int_gauge_vec!(
2288 40 : "pageserver_background_loop_semaphore_running_tasks",
2289 40 : "Number of background loop tasks running concurrently",
2290 40 : &["task"],
2291 40 : )
2292 40 : .unwrap();
2293 40 :
2294 40 : BackgroundLoopSemaphoreMetrics {
2295 400 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2296 400 : let kind = BackgroundLoopKind::from_usize(i);
2297 400 : counters.with_label_values(&[kind.into()])
2298 400 : })),
2299 400 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2300 400 : let kind = BackgroundLoopKind::from_usize(i);
2301 400 : durations.with_label_values(&[kind.into()])
2302 400 : })),
2303 400 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2304 400 : let kind = BackgroundLoopKind::from_usize(i);
2305 400 : waiting_tasks.with_label_values(&[kind.into()])
2306 400 : })),
2307 400 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2308 400 : let kind = BackgroundLoopKind::from_usize(i);
2309 400 : running_tasks.with_label_values(&[kind.into()])
2310 400 : })),
2311 40 : }
2312 40 : });
2313 :
2314 : impl BackgroundLoopSemaphoreMetrics {
2315 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2316 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2317 728 : pub(crate) fn record(
2318 728 : &self,
2319 728 : task: BackgroundLoopKind,
2320 728 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2321 728 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2322 728 : }
2323 : }
2324 :
2325 : /// Records metrics for a background task.
2326 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2327 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2328 : task: BackgroundLoopKind,
2329 : start: Instant,
2330 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2331 : }
2332 :
2333 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2334 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2335 : /// `wait_start_count` and `waiting_tasks`.
2336 728 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2337 728 : metrics.waiting_tasks[task].inc();
2338 728 : Self {
2339 728 : metrics,
2340 728 : task,
2341 728 : start: Instant::now(),
2342 728 : wait_counter_guard: Some(metrics.counters[task].guard()),
2343 728 : }
2344 728 : }
2345 :
2346 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2347 728 : pub fn acquired(&mut self) -> Duration {
2348 728 : let waited = self.start.elapsed();
2349 728 : self.wait_counter_guard.take().expect("already acquired");
2350 728 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2351 728 : self.metrics.waiting_tasks[self.task].dec();
2352 728 : self.metrics.running_tasks[self.task].inc();
2353 728 : waited
2354 728 : }
2355 : }
2356 :
2357 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2358 : /// The task either completed or was cancelled.
2359 728 : fn drop(&mut self) {
2360 728 : if self.wait_counter_guard.take().is_some() {
2361 0 : // Waiting.
2362 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2363 0 : self.metrics.waiting_tasks[self.task].dec();
2364 728 : } else {
2365 728 : // Running.
2366 728 : self.metrics.running_tasks[self.task].dec();
2367 728 : }
2368 728 : }
2369 : }
2370 :
2371 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2372 0 : register_int_counter_vec!(
2373 0 : "pageserver_background_loop_period_overrun_count",
2374 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2375 0 : &["task", "period"],
2376 0 : )
2377 0 : .expect("failed to define a metric")
2378 0 : });
2379 :
2380 : // walreceiver metrics
2381 :
2382 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2383 0 : register_int_counter!(
2384 0 : "pageserver_walreceiver_started_connections_total",
2385 0 : "Number of started walreceiver connections"
2386 0 : )
2387 0 : .expect("failed to define a metric")
2388 0 : });
2389 :
2390 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2391 0 : register_int_gauge!(
2392 0 : "pageserver_walreceiver_active_managers",
2393 0 : "Number of active walreceiver managers"
2394 0 : )
2395 0 : .expect("failed to define a metric")
2396 0 : });
2397 :
2398 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2399 0 : register_int_counter_vec!(
2400 0 : "pageserver_walreceiver_switches_total",
2401 0 : "Number of walreceiver manager change_connection calls",
2402 0 : &["reason"]
2403 0 : )
2404 0 : .expect("failed to define a metric")
2405 0 : });
2406 :
2407 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2408 0 : register_int_counter!(
2409 0 : "pageserver_walreceiver_broker_updates_total",
2410 0 : "Number of received broker updates in walreceiver"
2411 0 : )
2412 0 : .expect("failed to define a metric")
2413 0 : });
2414 :
2415 4 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2416 4 : register_int_counter_vec!(
2417 4 : "pageserver_walreceiver_candidates_events_total",
2418 4 : "Number of walreceiver candidate events",
2419 4 : &["event"]
2420 4 : )
2421 4 : .expect("failed to define a metric")
2422 4 : });
2423 :
2424 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2425 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2426 :
2427 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2428 4 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2429 :
2430 : // Metrics collected on WAL redo operations
2431 : //
2432 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2433 : // for access to the postgres process ('wait') since there is only one for
2434 : // each tenant.
2435 :
2436 : /// Time buckets are small because we want to be able to measure the
2437 : /// smallest redo processing times. These buckets allow us to measure down
2438 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2439 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2440 : ///
2441 : /// Values up to 1s are recorded because metrics show that we have redo
2442 : /// durations and lock times larger than 0.250s.
2443 : macro_rules! redo_histogram_time_buckets {
2444 : () => {
2445 : vec![
2446 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2447 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2448 : 1.000_000,
2449 : ]
2450 : };
2451 : }
2452 :
2453 : /// While we're at it, also measure the amount of records replayed in each
2454 : /// operation. We have a global 'total replayed' counter, but that's not
2455 : /// as useful as 'what is the skew for how many records we replay in one
2456 : /// operation'.
2457 : macro_rules! redo_histogram_count_buckets {
2458 : () => {
2459 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2460 : };
2461 : }
2462 :
2463 : macro_rules! redo_bytes_histogram_count_buckets {
2464 : () => {
2465 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2466 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2467 : vec![
2468 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2469 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2470 : ]
2471 : };
2472 : }
2473 :
2474 : pub(crate) struct WalIngestMetrics {
2475 : pub(crate) bytes_received: IntCounter,
2476 : pub(crate) records_received: IntCounter,
2477 : pub(crate) records_observed: IntCounter,
2478 : pub(crate) records_committed: IntCounter,
2479 : pub(crate) records_filtered: IntCounter,
2480 : pub(crate) values_committed_metadata_images: IntCounter,
2481 : pub(crate) values_committed_metadata_deltas: IntCounter,
2482 : pub(crate) values_committed_data_images: IntCounter,
2483 : pub(crate) values_committed_data_deltas: IntCounter,
2484 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2485 : }
2486 :
2487 : impl WalIngestMetrics {
2488 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2489 0 : if stats.metadata_images > 0 {
2490 0 : self.values_committed_metadata_images
2491 0 : .inc_by(stats.metadata_images);
2492 0 : }
2493 0 : if stats.metadata_deltas > 0 {
2494 0 : self.values_committed_metadata_deltas
2495 0 : .inc_by(stats.metadata_deltas);
2496 0 : }
2497 0 : if stats.data_images > 0 {
2498 0 : self.values_committed_data_images.inc_by(stats.data_images);
2499 0 : }
2500 0 : if stats.data_deltas > 0 {
2501 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2502 0 : }
2503 0 : }
2504 : }
2505 :
2506 20 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2507 20 : let values_committed = register_int_counter_vec!(
2508 20 : "pageserver_wal_ingest_values_committed",
2509 20 : "Number of values committed to pageserver storage from WAL records",
2510 20 : &["class", "kind"],
2511 20 : )
2512 20 : .expect("failed to define a metric");
2513 20 :
2514 20 : WalIngestMetrics {
2515 20 : bytes_received: register_int_counter!(
2516 20 : "pageserver_wal_ingest_bytes_received",
2517 20 : "Bytes of WAL ingested from safekeepers",
2518 20 : )
2519 20 : .unwrap(),
2520 20 : records_received: register_int_counter!(
2521 20 : "pageserver_wal_ingest_records_received",
2522 20 : "Number of WAL records received from safekeepers"
2523 20 : )
2524 20 : .expect("failed to define a metric"),
2525 20 : records_observed: register_int_counter!(
2526 20 : "pageserver_wal_ingest_records_observed",
2527 20 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2528 20 : )
2529 20 : .expect("failed to define a metric"),
2530 20 : records_committed: register_int_counter!(
2531 20 : "pageserver_wal_ingest_records_committed",
2532 20 : "Number of WAL records which resulted in writes to pageserver storage"
2533 20 : )
2534 20 : .expect("failed to define a metric"),
2535 20 : records_filtered: register_int_counter!(
2536 20 : "pageserver_wal_ingest_records_filtered",
2537 20 : "Number of WAL records filtered out due to sharding"
2538 20 : )
2539 20 : .expect("failed to define a metric"),
2540 20 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2541 20 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2542 20 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2543 20 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2544 20 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2545 20 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2546 20 : "Total number of zero gap blocks written on relation extends"
2547 20 : )
2548 20 : .expect("failed to define a metric"),
2549 20 : }
2550 20 : });
2551 :
2552 412 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2553 412 : register_int_counter_vec!(
2554 412 : "pageserver_timeline_wal_records_received",
2555 412 : "Number of WAL records received per shard",
2556 412 : &["tenant_id", "shard_id", "timeline_id"]
2557 412 : )
2558 412 : .expect("failed to define a metric")
2559 412 : });
2560 :
2561 12 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2562 12 : register_histogram!(
2563 12 : "pageserver_wal_redo_seconds",
2564 12 : "Time spent on WAL redo",
2565 12 : redo_histogram_time_buckets!()
2566 12 : )
2567 12 : .expect("failed to define a metric")
2568 12 : });
2569 :
2570 12 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2571 12 : register_histogram!(
2572 12 : "pageserver_wal_redo_records_histogram",
2573 12 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2574 12 : redo_histogram_count_buckets!(),
2575 12 : )
2576 12 : .expect("failed to define a metric")
2577 12 : });
2578 :
2579 12 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2580 12 : register_histogram!(
2581 12 : "pageserver_wal_redo_bytes_histogram",
2582 12 : "Histogram of number of records replayed per redo sent to Postgres",
2583 12 : redo_bytes_histogram_count_buckets!(),
2584 12 : )
2585 12 : .expect("failed to define a metric")
2586 12 : });
2587 :
2588 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2589 12 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2590 12 : register_int_counter!(
2591 12 : "pageserver_replayed_wal_records_total",
2592 12 : "Number of WAL records replayed in WAL redo process"
2593 12 : )
2594 12 : .unwrap()
2595 12 : });
2596 :
2597 : #[rustfmt::skip]
2598 16 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2599 16 : register_histogram!(
2600 16 : "pageserver_wal_redo_process_launch_duration",
2601 16 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2602 16 : vec![
2603 16 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2604 16 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2605 16 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2606 16 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2607 16 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2608 16 : ],
2609 16 : )
2610 16 : .expect("failed to define a metric")
2611 16 : });
2612 :
2613 : pub(crate) struct WalRedoProcessCounters {
2614 : pub(crate) started: IntCounter,
2615 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2616 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2617 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2618 : }
2619 :
2620 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2621 : pub(crate) enum WalRedoKillCause {
2622 : WalRedoProcessDrop,
2623 : NoLeakChildDrop,
2624 : Startup,
2625 : }
2626 :
2627 : impl Default for WalRedoProcessCounters {
2628 16 : fn default() -> Self {
2629 16 : let started = register_int_counter!(
2630 16 : "pageserver_wal_redo_process_started_total",
2631 16 : "Number of WAL redo processes started",
2632 16 : )
2633 16 : .unwrap();
2634 16 :
2635 16 : let killed = register_int_counter_vec!(
2636 16 : "pageserver_wal_redo_process_stopped_total",
2637 16 : "Number of WAL redo processes stopped",
2638 16 : &["cause"],
2639 16 : )
2640 16 : .unwrap();
2641 16 :
2642 16 : let active_stderr_logger_tasks_started = register_int_counter!(
2643 16 : "pageserver_walredo_stderr_logger_tasks_started_total",
2644 16 : "Number of active walredo stderr logger tasks that have started",
2645 16 : )
2646 16 : .unwrap();
2647 16 :
2648 16 : let active_stderr_logger_tasks_finished = register_int_counter!(
2649 16 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2650 16 : "Number of active walredo stderr logger tasks that have finished",
2651 16 : )
2652 16 : .unwrap();
2653 16 :
2654 16 : Self {
2655 16 : started,
2656 48 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2657 48 : let cause = WalRedoKillCause::from_usize(i);
2658 48 : let cause_str: &'static str = cause.into();
2659 48 : killed.with_label_values(&[cause_str])
2660 48 : })),
2661 16 : active_stderr_logger_tasks_started,
2662 16 : active_stderr_logger_tasks_finished,
2663 16 : }
2664 16 : }
2665 : }
2666 :
2667 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2668 : Lazy::new(WalRedoProcessCounters::default);
2669 :
2670 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2671 : pub(crate) struct StorageTimeMetricsTimer {
2672 : metrics: StorageTimeMetrics,
2673 : start: Instant,
2674 : }
2675 :
2676 : impl StorageTimeMetricsTimer {
2677 4260 : fn new(metrics: StorageTimeMetrics) -> Self {
2678 4260 : Self {
2679 4260 : metrics,
2680 4260 : start: Instant::now(),
2681 4260 : }
2682 4260 : }
2683 :
2684 : /// Returns the elapsed duration of the timer.
2685 4260 : pub fn elapsed(&self) -> Duration {
2686 4260 : self.start.elapsed()
2687 4260 : }
2688 :
2689 : /// Record the time from creation to now and return it.
2690 4260 : pub fn stop_and_record(self) -> Duration {
2691 4260 : let duration = self.elapsed();
2692 4260 : let seconds = duration.as_secs_f64();
2693 4260 : self.metrics.timeline_sum.inc_by(seconds);
2694 4260 : self.metrics.timeline_count.inc();
2695 4260 : self.metrics.global_histogram.observe(seconds);
2696 4260 : duration
2697 4260 : }
2698 :
2699 : /// Turns this timer into a timer, which will always record -- usually this means recording
2700 : /// regardless an early `?` path was taken in a function.
2701 8 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2702 8 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2703 8 : }
2704 : }
2705 :
2706 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2707 :
2708 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2709 8 : fn drop(&mut self) {
2710 8 : if let Some(inner) = self.0.take() {
2711 8 : inner.stop_and_record();
2712 8 : }
2713 8 : }
2714 : }
2715 :
2716 : impl AlwaysRecordingStorageTimeMetricsTimer {
2717 : /// Returns the elapsed duration of the timer.
2718 0 : pub fn elapsed(&self) -> Duration {
2719 0 : self.0.as_ref().expect("not dropped yet").elapsed()
2720 0 : }
2721 : }
2722 :
2723 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2724 : /// timeline total sum and count.
2725 : #[derive(Clone, Debug)]
2726 : pub(crate) struct StorageTimeMetrics {
2727 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2728 : timeline_sum: Counter,
2729 : /// Number of oeprations, per operation, tenant_id and timeline_id
2730 : timeline_count: IntCounter,
2731 : /// Global histogram having only the "operation" label.
2732 : global_histogram: Histogram,
2733 : }
2734 :
2735 : impl StorageTimeMetrics {
2736 8136 : pub fn new(
2737 8136 : operation: StorageTimeOperation,
2738 8136 : tenant_id: &str,
2739 8136 : shard_id: &str,
2740 8136 : timeline_id: &str,
2741 8136 : ) -> Self {
2742 8136 : let operation: &'static str = operation.into();
2743 8136 :
2744 8136 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2745 8136 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2746 8136 : .unwrap();
2747 8136 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2748 8136 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2749 8136 : .unwrap();
2750 8136 : let global_histogram = STORAGE_TIME_GLOBAL
2751 8136 : .get_metric_with_label_values(&[operation])
2752 8136 : .unwrap();
2753 8136 :
2754 8136 : StorageTimeMetrics {
2755 8136 : timeline_sum,
2756 8136 : timeline_count,
2757 8136 : global_histogram,
2758 8136 : }
2759 8136 : }
2760 :
2761 : /// Starts timing a new operation.
2762 : ///
2763 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2764 4260 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2765 4260 : StorageTimeMetricsTimer::new(self.clone())
2766 4260 : }
2767 : }
2768 :
2769 : #[derive(Debug)]
2770 : pub(crate) struct TimelineMetrics {
2771 : tenant_id: String,
2772 : shard_id: String,
2773 : timeline_id: String,
2774 : pub flush_time_histo: StorageTimeMetrics,
2775 : pub flush_delay_histo: StorageTimeMetrics,
2776 : pub flush_wait_upload_time_gauge: Gauge,
2777 : pub compact_time_histo: StorageTimeMetrics,
2778 : pub create_images_time_histo: StorageTimeMetrics,
2779 : pub logical_size_histo: StorageTimeMetrics,
2780 : pub imitate_logical_size_histo: StorageTimeMetrics,
2781 : pub load_layer_map_histo: StorageTimeMetrics,
2782 : pub garbage_collect_histo: StorageTimeMetrics,
2783 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2784 : pub last_record_lsn_gauge: IntGauge,
2785 : pub disk_consistent_lsn_gauge: IntGauge,
2786 : pub pitr_history_size: UIntGauge,
2787 : pub archival_size: UIntGauge,
2788 : pub layers_per_read: Histogram,
2789 : pub standby_horizon_gauge: IntGauge,
2790 : pub resident_physical_size_gauge: UIntGauge,
2791 : pub visible_physical_size_gauge: UIntGauge,
2792 : /// copy of LayeredTimeline.current_logical_size
2793 : pub current_logical_size_gauge: UIntGauge,
2794 : pub aux_file_size_gauge: IntGauge,
2795 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2796 : pub evictions: IntCounter,
2797 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2798 : /// Number of valid LSN leases.
2799 : pub valid_lsn_lease_count_gauge: UIntGauge,
2800 : pub wal_records_received: IntCounter,
2801 : shutdown: std::sync::atomic::AtomicBool,
2802 : }
2803 :
2804 : impl TimelineMetrics {
2805 904 : pub fn new(
2806 904 : tenant_shard_id: &TenantShardId,
2807 904 : timeline_id_raw: &TimelineId,
2808 904 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2809 904 : ) -> Self {
2810 904 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2811 904 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2812 904 : let timeline_id = timeline_id_raw.to_string();
2813 904 : let flush_time_histo = StorageTimeMetrics::new(
2814 904 : StorageTimeOperation::LayerFlush,
2815 904 : &tenant_id,
2816 904 : &shard_id,
2817 904 : &timeline_id,
2818 904 : );
2819 904 : let flush_delay_histo = StorageTimeMetrics::new(
2820 904 : StorageTimeOperation::LayerFlushDelay,
2821 904 : &tenant_id,
2822 904 : &shard_id,
2823 904 : &timeline_id,
2824 904 : );
2825 904 : let flush_wait_upload_time_gauge = FLUSH_WAIT_UPLOAD_TIME
2826 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2827 904 : .unwrap();
2828 904 : let compact_time_histo = StorageTimeMetrics::new(
2829 904 : StorageTimeOperation::Compact,
2830 904 : &tenant_id,
2831 904 : &shard_id,
2832 904 : &timeline_id,
2833 904 : );
2834 904 : let create_images_time_histo = StorageTimeMetrics::new(
2835 904 : StorageTimeOperation::CreateImages,
2836 904 : &tenant_id,
2837 904 : &shard_id,
2838 904 : &timeline_id,
2839 904 : );
2840 904 : let logical_size_histo = StorageTimeMetrics::new(
2841 904 : StorageTimeOperation::LogicalSize,
2842 904 : &tenant_id,
2843 904 : &shard_id,
2844 904 : &timeline_id,
2845 904 : );
2846 904 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2847 904 : StorageTimeOperation::ImitateLogicalSize,
2848 904 : &tenant_id,
2849 904 : &shard_id,
2850 904 : &timeline_id,
2851 904 : );
2852 904 : let load_layer_map_histo = StorageTimeMetrics::new(
2853 904 : StorageTimeOperation::LoadLayerMap,
2854 904 : &tenant_id,
2855 904 : &shard_id,
2856 904 : &timeline_id,
2857 904 : );
2858 904 : let garbage_collect_histo = StorageTimeMetrics::new(
2859 904 : StorageTimeOperation::Gc,
2860 904 : &tenant_id,
2861 904 : &shard_id,
2862 904 : &timeline_id,
2863 904 : );
2864 904 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2865 904 : StorageTimeOperation::FindGcCutoffs,
2866 904 : &tenant_id,
2867 904 : &shard_id,
2868 904 : &timeline_id,
2869 904 : );
2870 904 : let last_record_lsn_gauge = LAST_RECORD_LSN
2871 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2872 904 : .unwrap();
2873 904 :
2874 904 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
2875 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2876 904 : .unwrap();
2877 904 :
2878 904 : let pitr_history_size = PITR_HISTORY_SIZE
2879 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2880 904 : .unwrap();
2881 904 :
2882 904 : let archival_size = TIMELINE_ARCHIVE_SIZE
2883 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2884 904 : .unwrap();
2885 904 :
2886 904 : let layers_per_read = LAYERS_PER_READ
2887 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2888 904 : .unwrap();
2889 904 :
2890 904 : let standby_horizon_gauge = STANDBY_HORIZON
2891 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2892 904 : .unwrap();
2893 904 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2894 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2895 904 : .unwrap();
2896 904 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
2897 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2898 904 : .unwrap();
2899 904 : // TODO: we shouldn't expose this metric
2900 904 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2901 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2902 904 : .unwrap();
2903 904 : let aux_file_size_gauge = AUX_FILE_SIZE
2904 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2905 904 : .unwrap();
2906 904 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
2907 904 : let directory_entries_count_gauge_closure = {
2908 904 : let tenant_shard_id = *tenant_shard_id;
2909 904 : let timeline_id_raw = *timeline_id_raw;
2910 0 : move || {
2911 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2912 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2913 0 : let timeline_id = timeline_id_raw.to_string();
2914 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
2915 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2916 0 : .unwrap();
2917 0 : gauge
2918 0 : }
2919 : };
2920 904 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
2921 904 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
2922 904 : let evictions = EVICTIONS
2923 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2924 904 : .unwrap();
2925 904 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
2926 904 : .build(&tenant_id, &shard_id, &timeline_id);
2927 904 :
2928 904 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
2929 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2930 904 : .unwrap();
2931 904 :
2932 904 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
2933 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2934 904 : .unwrap();
2935 904 :
2936 904 : TimelineMetrics {
2937 904 : tenant_id,
2938 904 : shard_id,
2939 904 : timeline_id,
2940 904 : flush_time_histo,
2941 904 : flush_delay_histo,
2942 904 : flush_wait_upload_time_gauge,
2943 904 : compact_time_histo,
2944 904 : create_images_time_histo,
2945 904 : logical_size_histo,
2946 904 : imitate_logical_size_histo,
2947 904 : garbage_collect_histo,
2948 904 : find_gc_cutoffs_histo,
2949 904 : load_layer_map_histo,
2950 904 : last_record_lsn_gauge,
2951 904 : disk_consistent_lsn_gauge,
2952 904 : pitr_history_size,
2953 904 : archival_size,
2954 904 : layers_per_read,
2955 904 : standby_horizon_gauge,
2956 904 : resident_physical_size_gauge,
2957 904 : visible_physical_size_gauge,
2958 904 : current_logical_size_gauge,
2959 904 : aux_file_size_gauge,
2960 904 : directory_entries_count_gauge,
2961 904 : evictions,
2962 904 : evictions_with_low_residence_duration: std::sync::RwLock::new(
2963 904 : evictions_with_low_residence_duration,
2964 904 : ),
2965 904 : valid_lsn_lease_count_gauge,
2966 904 : wal_records_received,
2967 904 : shutdown: std::sync::atomic::AtomicBool::default(),
2968 904 : }
2969 904 : }
2970 :
2971 3156 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
2972 3156 : self.resident_physical_size_add(sz);
2973 3156 : }
2974 :
2975 1089 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
2976 1089 : self.resident_physical_size_gauge.sub(sz);
2977 1089 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
2978 1089 : }
2979 :
2980 3428 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
2981 3428 : self.resident_physical_size_gauge.add(sz);
2982 3428 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
2983 3428 : }
2984 :
2985 20 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
2986 20 : self.resident_physical_size_gauge.get()
2987 20 : }
2988 :
2989 2356 : pub(crate) fn flush_wait_upload_time_gauge_add(&self, duration: f64) {
2990 2356 : self.flush_wait_upload_time_gauge.add(duration);
2991 2356 : crate::metrics::FLUSH_WAIT_UPLOAD_TIME
2992 2356 : .get_metric_with_label_values(&[&self.tenant_id, &self.shard_id, &self.timeline_id])
2993 2356 : .unwrap()
2994 2356 : .add(duration);
2995 2356 : }
2996 :
2997 : /// Generates TIMELINE_LAYER labels for a persistent layer.
2998 5245 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
2999 5245 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3000 2848 : true => LayerLevel::L0,
3001 2397 : false => LayerLevel::L1,
3002 : };
3003 5245 : let kind = match layer_desc.is_delta() {
3004 4362 : true => LayerKind::Delta,
3005 883 : false => LayerKind::Image,
3006 : };
3007 5245 : [
3008 5245 : &self.tenant_id,
3009 5245 : &self.shard_id,
3010 5245 : &self.timeline_id,
3011 5245 : level.into(),
3012 5245 : kind.into(),
3013 5245 : ]
3014 5245 : }
3015 :
3016 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3017 4712 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3018 4712 : [
3019 4712 : &self.tenant_id,
3020 4712 : &self.shard_id,
3021 4712 : &self.timeline_id,
3022 4712 : LayerLevel::Frozen.into(),
3023 4712 : LayerKind::Delta.into(), // by definition
3024 4712 : ]
3025 4712 : }
3026 :
3027 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3028 2356 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3029 2356 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3030 2356 : let labels = self.make_frozen_layer_labels(layer);
3031 2356 : let size = layer.try_len().expect("frozen layer should have no writer");
3032 2356 : TIMELINE_LAYER_COUNT
3033 2356 : .get_metric_with_label_values(&labels)
3034 2356 : .unwrap()
3035 2356 : .dec();
3036 2356 : TIMELINE_LAYER_SIZE
3037 2356 : .get_metric_with_label_values(&labels)
3038 2356 : .unwrap()
3039 2356 : .sub(size);
3040 2356 : }
3041 :
3042 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3043 2356 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3044 2356 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3045 2356 : let labels = self.make_frozen_layer_labels(layer);
3046 2356 : let size = layer.try_len().expect("frozen layer should have no writer");
3047 2356 : TIMELINE_LAYER_COUNT
3048 2356 : .get_metric_with_label_values(&labels)
3049 2356 : .unwrap()
3050 2356 : .inc();
3051 2356 : TIMELINE_LAYER_SIZE
3052 2356 : .get_metric_with_label_values(&labels)
3053 2356 : .unwrap()
3054 2356 : .add(size);
3055 2356 : }
3056 :
3057 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3058 1385 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3059 1385 : let labels = self.make_layer_labels(layer_desc);
3060 1385 : TIMELINE_LAYER_COUNT
3061 1385 : .get_metric_with_label_values(&labels)
3062 1385 : .unwrap()
3063 1385 : .dec();
3064 1385 : TIMELINE_LAYER_SIZE
3065 1385 : .get_metric_with_label_values(&labels)
3066 1385 : .unwrap()
3067 1385 : .sub(layer_desc.file_size);
3068 1385 : }
3069 :
3070 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3071 3860 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3072 3860 : let labels = self.make_layer_labels(layer_desc);
3073 3860 : TIMELINE_LAYER_COUNT
3074 3860 : .get_metric_with_label_values(&labels)
3075 3860 : .unwrap()
3076 3860 : .inc();
3077 3860 : TIMELINE_LAYER_SIZE
3078 3860 : .get_metric_with_label_values(&labels)
3079 3860 : .unwrap()
3080 3860 : .add(layer_desc.file_size);
3081 3860 : }
3082 :
3083 20 : pub(crate) fn shutdown(&self) {
3084 20 : let was_shutdown = self
3085 20 : .shutdown
3086 20 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3087 20 :
3088 20 : if was_shutdown {
3089 : // this happens on tenant deletion because tenant first shuts down timelines, then
3090 : // invokes timeline deletion which first shuts down the timeline again.
3091 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3092 0 : return;
3093 20 : }
3094 20 :
3095 20 : let tenant_id = &self.tenant_id;
3096 20 : let timeline_id = &self.timeline_id;
3097 20 : let shard_id = &self.shard_id;
3098 20 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3099 20 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3100 20 : let _ = FLUSH_WAIT_UPLOAD_TIME.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3101 20 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3102 20 : {
3103 20 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3104 20 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3105 20 : }
3106 20 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3107 20 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3108 20 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3109 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3110 20 : }
3111 :
3112 20 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3113 20 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3114 :
3115 80 : for ref level in LayerLevel::iter() {
3116 180 : for ref kind in LayerKind::iter() {
3117 120 : let labels: [&str; 5] =
3118 120 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3119 120 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3120 120 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3121 120 : }
3122 : }
3123 :
3124 20 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3125 20 :
3126 20 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3127 20 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3128 20 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3129 20 :
3130 20 : self.evictions_with_low_residence_duration
3131 20 : .write()
3132 20 : .unwrap()
3133 20 : .remove(tenant_id, shard_id, timeline_id);
3134 :
3135 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3136 : // removed at the end of it. The idea is to have the metrics outlive the
3137 : // entity during which they're observed, e.g., the smgr metrics shall
3138 : // outlive an individual smgr connection, but not the timeline.
3139 :
3140 200 : for op in StorageTimeOperation::VARIANTS {
3141 180 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3142 180 : op,
3143 180 : tenant_id,
3144 180 : shard_id,
3145 180 : timeline_id,
3146 180 : ]);
3147 180 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3148 180 : op,
3149 180 : tenant_id,
3150 180 : shard_id,
3151 180 : timeline_id,
3152 180 : ]);
3153 180 : }
3154 :
3155 60 : for op in STORAGE_IO_SIZE_OPERATIONS {
3156 40 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3157 40 : }
3158 :
3159 20 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3160 20 : SmgrQueryType::GetPageAtLsn.into(),
3161 20 : tenant_id,
3162 20 : shard_id,
3163 20 : timeline_id,
3164 20 : ]);
3165 20 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3166 20 : SmgrQueryType::GetPageAtLsn.into(),
3167 20 : tenant_id,
3168 20 : shard_id,
3169 20 : timeline_id,
3170 20 : ]);
3171 20 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3172 20 : tenant_id,
3173 20 : shard_id,
3174 20 : timeline_id,
3175 20 : ]);
3176 20 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3177 20 : tenant_id,
3178 20 : shard_id,
3179 20 : timeline_id,
3180 20 : ]);
3181 20 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3182 20 : tenant_id,
3183 20 : shard_id,
3184 20 : timeline_id,
3185 20 : ]);
3186 20 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3187 20 : tenant_id,
3188 20 : shard_id,
3189 20 : timeline_id,
3190 20 : ]);
3191 20 : }
3192 : }
3193 :
3194 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3195 12 : // Only shard zero deals in synthetic sizes
3196 12 : if tenant_shard_id.is_shard_zero() {
3197 12 : let tid = tenant_shard_id.tenant_id.to_string();
3198 12 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3199 12 : }
3200 :
3201 12 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3202 12 :
3203 12 : // we leave the BROKEN_TENANTS_SET entry if any
3204 12 : }
3205 :
3206 : /// Maintain a per timeline gauge in addition to the global gauge.
3207 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3208 : last_set: AtomicU64,
3209 : gauge: UIntGauge,
3210 : }
3211 :
3212 : impl PerTimelineRemotePhysicalSizeGauge {
3213 924 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3214 924 : Self {
3215 924 : last_set: AtomicU64::new(0),
3216 924 : gauge: per_timeline_gauge,
3217 924 : }
3218 924 : }
3219 3887 : pub(crate) fn set(&self, sz: u64) {
3220 3887 : self.gauge.set(sz);
3221 3887 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3222 3887 : if sz < prev {
3223 75 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3224 3812 : } else {
3225 3812 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3226 3812 : };
3227 3887 : }
3228 4 : pub(crate) fn get(&self) -> u64 {
3229 4 : self.gauge.get()
3230 4 : }
3231 : }
3232 :
3233 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3234 40 : fn drop(&mut self) {
3235 40 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3236 40 : }
3237 : }
3238 :
3239 : pub(crate) struct RemoteTimelineClientMetrics {
3240 : tenant_id: String,
3241 : shard_id: String,
3242 : timeline_id: String,
3243 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3244 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3245 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3246 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3247 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3248 : }
3249 :
3250 : impl RemoteTimelineClientMetrics {
3251 924 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3252 924 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3253 924 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3254 924 : let timeline_id_str = timeline_id.to_string();
3255 924 :
3256 924 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3257 924 : REMOTE_PHYSICAL_SIZE
3258 924 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3259 924 : .unwrap(),
3260 924 : );
3261 924 :
3262 924 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3263 924 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3264 924 : .unwrap();
3265 924 :
3266 924 : RemoteTimelineClientMetrics {
3267 924 : tenant_id: tenant_id_str,
3268 924 : shard_id: shard_id_str,
3269 924 : timeline_id: timeline_id_str,
3270 924 : calls: Mutex::new(HashMap::default()),
3271 924 : bytes_started_counter: Mutex::new(HashMap::default()),
3272 924 : bytes_finished_counter: Mutex::new(HashMap::default()),
3273 924 : remote_physical_size_gauge,
3274 924 : projected_remote_consistent_lsn_gauge,
3275 924 : }
3276 924 : }
3277 :
3278 6165 : pub fn remote_operation_time(
3279 6165 : &self,
3280 6165 : file_kind: &RemoteOpFileKind,
3281 6165 : op_kind: &RemoteOpKind,
3282 6165 : status: &'static str,
3283 6165 : ) -> Histogram {
3284 6165 : let key = (file_kind.as_str(), op_kind.as_str(), status);
3285 6165 : REMOTE_OPERATION_TIME
3286 6165 : .get_metric_with_label_values(&[key.0, key.1, key.2])
3287 6165 : .unwrap()
3288 6165 : }
3289 :
3290 14444 : fn calls_counter_pair(
3291 14444 : &self,
3292 14444 : file_kind: &RemoteOpFileKind,
3293 14444 : op_kind: &RemoteOpKind,
3294 14444 : ) -> IntCounterPair {
3295 14444 : let mut guard = self.calls.lock().unwrap();
3296 14444 : let key = (file_kind.as_str(), op_kind.as_str());
3297 14444 : let metric = guard.entry(key).or_insert_with(move || {
3298 1664 : REMOTE_TIMELINE_CLIENT_CALLS
3299 1664 : .get_metric_with_label_values(&[
3300 1664 : &self.tenant_id,
3301 1664 : &self.shard_id,
3302 1664 : &self.timeline_id,
3303 1664 : key.0,
3304 1664 : key.1,
3305 1664 : ])
3306 1664 : .unwrap()
3307 14444 : });
3308 14444 : metric.clone()
3309 14444 : }
3310 :
3311 3484 : fn bytes_started_counter(
3312 3484 : &self,
3313 3484 : file_kind: &RemoteOpFileKind,
3314 3484 : op_kind: &RemoteOpKind,
3315 3484 : ) -> IntCounter {
3316 3484 : let mut guard = self.bytes_started_counter.lock().unwrap();
3317 3484 : let key = (file_kind.as_str(), op_kind.as_str());
3318 3484 : let metric = guard.entry(key).or_insert_with(move || {
3319 656 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3320 656 : .get_metric_with_label_values(&[
3321 656 : &self.tenant_id,
3322 656 : &self.shard_id,
3323 656 : &self.timeline_id,
3324 656 : key.0,
3325 656 : key.1,
3326 656 : ])
3327 656 : .unwrap()
3328 3484 : });
3329 3484 : metric.clone()
3330 3484 : }
3331 :
3332 6598 : fn bytes_finished_counter(
3333 6598 : &self,
3334 6598 : file_kind: &RemoteOpFileKind,
3335 6598 : op_kind: &RemoteOpKind,
3336 6598 : ) -> IntCounter {
3337 6598 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3338 6598 : let key = (file_kind.as_str(), op_kind.as_str());
3339 6598 : let metric = guard.entry(key).or_insert_with(move || {
3340 656 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3341 656 : .get_metric_with_label_values(&[
3342 656 : &self.tenant_id,
3343 656 : &self.shard_id,
3344 656 : &self.timeline_id,
3345 656 : key.0,
3346 656 : key.1,
3347 656 : ])
3348 656 : .unwrap()
3349 6598 : });
3350 6598 : metric.clone()
3351 6598 : }
3352 : }
3353 :
3354 : #[cfg(test)]
3355 : impl RemoteTimelineClientMetrics {
3356 12 : pub fn get_bytes_started_counter_value(
3357 12 : &self,
3358 12 : file_kind: &RemoteOpFileKind,
3359 12 : op_kind: &RemoteOpKind,
3360 12 : ) -> Option<u64> {
3361 12 : let guard = self.bytes_started_counter.lock().unwrap();
3362 12 : let key = (file_kind.as_str(), op_kind.as_str());
3363 12 : guard.get(&key).map(|counter| counter.get())
3364 12 : }
3365 :
3366 12 : pub fn get_bytes_finished_counter_value(
3367 12 : &self,
3368 12 : file_kind: &RemoteOpFileKind,
3369 12 : op_kind: &RemoteOpKind,
3370 12 : ) -> Option<u64> {
3371 12 : let guard = self.bytes_finished_counter.lock().unwrap();
3372 12 : let key = (file_kind.as_str(), op_kind.as_str());
3373 12 : guard.get(&key).map(|counter| counter.get())
3374 12 : }
3375 : }
3376 :
3377 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3378 : #[must_use]
3379 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3380 : /// Decremented on drop.
3381 : calls_counter_pair: Option<IntCounterPair>,
3382 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3383 : bytes_finished: Option<(IntCounter, u64)>,
3384 : }
3385 :
3386 : impl RemoteTimelineClientCallMetricGuard {
3387 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3388 : /// The caller vouches to do the metric updates manually.
3389 7601 : pub fn will_decrement_manually(mut self) {
3390 7601 : let RemoteTimelineClientCallMetricGuard {
3391 7601 : calls_counter_pair,
3392 7601 : bytes_finished,
3393 7601 : } = &mut self;
3394 7601 : calls_counter_pair.take();
3395 7601 : bytes_finished.take();
3396 7601 : }
3397 : }
3398 :
3399 : impl Drop for RemoteTimelineClientCallMetricGuard {
3400 7669 : fn drop(&mut self) {
3401 7669 : let RemoteTimelineClientCallMetricGuard {
3402 7669 : calls_counter_pair,
3403 7669 : bytes_finished,
3404 7669 : } = self;
3405 7669 : if let Some(guard) = calls_counter_pair.take() {
3406 68 : guard.dec();
3407 7601 : }
3408 7669 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3409 0 : bytes_finished_metric.inc_by(*value);
3410 7669 : }
3411 7669 : }
3412 : }
3413 :
3414 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3415 : /// track the byte size of this call in applicable metric(s).
3416 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3417 : /// Do not account for this call's byte size in any metrics.
3418 : /// The `reason` field is there to make the call sites self-documenting
3419 : /// about why they don't need the metric.
3420 : DontTrackSize { reason: &'static str },
3421 : /// Track the byte size of the call in applicable metric(s).
3422 : Bytes(u64),
3423 : }
3424 :
3425 : impl RemoteTimelineClientMetrics {
3426 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3427 : ///
3428 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3429 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3430 : /// is more suitable.
3431 : /// Never do both.
3432 7669 : pub(crate) fn call_begin(
3433 7669 : &self,
3434 7669 : file_kind: &RemoteOpFileKind,
3435 7669 : op_kind: &RemoteOpKind,
3436 7669 : size: RemoteTimelineClientMetricsCallTrackSize,
3437 7669 : ) -> RemoteTimelineClientCallMetricGuard {
3438 7669 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3439 7669 : calls_counter_pair.inc();
3440 :
3441 7669 : let bytes_finished = match size {
3442 4185 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3443 4185 : // nothing to do
3444 4185 : None
3445 : }
3446 3484 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3447 3484 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3448 3484 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3449 3484 : Some((finished_counter, size))
3450 : }
3451 : };
3452 7669 : RemoteTimelineClientCallMetricGuard {
3453 7669 : calls_counter_pair: Some(calls_counter_pair),
3454 7669 : bytes_finished,
3455 7669 : }
3456 7669 : }
3457 :
3458 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3459 : /// Using the guard object is generally preferable.
3460 : /// See [`call_begin`](Self::call_begin) for more context.
3461 6775 : pub(crate) fn call_end(
3462 6775 : &self,
3463 6775 : file_kind: &RemoteOpFileKind,
3464 6775 : op_kind: &RemoteOpKind,
3465 6775 : size: RemoteTimelineClientMetricsCallTrackSize,
3466 6775 : ) {
3467 6775 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3468 6775 : calls_counter_pair.dec();
3469 6775 : match size {
3470 3661 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3471 3114 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3472 3114 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3473 3114 : }
3474 : }
3475 6775 : }
3476 : }
3477 :
3478 : impl Drop for RemoteTimelineClientMetrics {
3479 40 : fn drop(&mut self) {
3480 40 : let RemoteTimelineClientMetrics {
3481 40 : tenant_id,
3482 40 : shard_id,
3483 40 : timeline_id,
3484 40 : remote_physical_size_gauge,
3485 40 : calls,
3486 40 : bytes_started_counter,
3487 40 : bytes_finished_counter,
3488 40 : projected_remote_consistent_lsn_gauge,
3489 40 : } = self;
3490 48 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3491 48 : let mut res = [Ok(()), Ok(())];
3492 48 : REMOTE_TIMELINE_CLIENT_CALLS
3493 48 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3494 48 : // don't care about results
3495 48 : }
3496 40 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3497 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3498 12 : tenant_id,
3499 12 : shard_id,
3500 12 : timeline_id,
3501 12 : a,
3502 12 : b,
3503 12 : ]);
3504 12 : }
3505 40 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3506 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3507 12 : tenant_id,
3508 12 : shard_id,
3509 12 : timeline_id,
3510 12 : a,
3511 12 : b,
3512 12 : ]);
3513 12 : }
3514 40 : {
3515 40 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3516 40 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3517 40 : }
3518 40 : {
3519 40 : let _ = projected_remote_consistent_lsn_gauge;
3520 40 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3521 40 : tenant_id,
3522 40 : shard_id,
3523 40 : timeline_id,
3524 40 : ]);
3525 40 : }
3526 40 : }
3527 : }
3528 :
3529 : /// Wrapper future that measures the time spent by a remote storage operation,
3530 : /// and records the time and success/failure as a prometheus metric.
3531 : pub(crate) trait MeasureRemoteOp: Sized {
3532 6431 : fn measure_remote_op(
3533 6431 : self,
3534 6431 : file_kind: RemoteOpFileKind,
3535 6431 : op: RemoteOpKind,
3536 6431 : metrics: Arc<RemoteTimelineClientMetrics>,
3537 6431 : ) -> MeasuredRemoteOp<Self> {
3538 6431 : let start = Instant::now();
3539 6431 : MeasuredRemoteOp {
3540 6431 : inner: self,
3541 6431 : file_kind,
3542 6431 : op,
3543 6431 : start,
3544 6431 : metrics,
3545 6431 : }
3546 6431 : }
3547 : }
3548 :
3549 : impl<T: Sized> MeasureRemoteOp for T {}
3550 :
3551 : pin_project! {
3552 : pub(crate) struct MeasuredRemoteOp<F>
3553 : {
3554 : #[pin]
3555 : inner: F,
3556 : file_kind: RemoteOpFileKind,
3557 : op: RemoteOpKind,
3558 : start: Instant,
3559 : metrics: Arc<RemoteTimelineClientMetrics>,
3560 : }
3561 : }
3562 :
3563 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
3564 : type Output = Result<O, E>;
3565 :
3566 96370 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3567 96370 : let this = self.project();
3568 96370 : let poll_result = this.inner.poll(cx);
3569 96370 : if let Poll::Ready(ref res) = poll_result {
3570 6165 : let duration = this.start.elapsed();
3571 6165 : let status = if res.is_ok() { &"success" } else { &"failure" };
3572 6165 : this.metrics
3573 6165 : .remote_operation_time(this.file_kind, this.op, status)
3574 6165 : .observe(duration.as_secs_f64());
3575 90205 : }
3576 96370 : poll_result
3577 96370 : }
3578 : }
3579 :
3580 : pub mod tokio_epoll_uring {
3581 : use std::collections::HashMap;
3582 : use std::sync::{Arc, Mutex};
3583 :
3584 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3585 : use once_cell::sync::Lazy;
3586 :
3587 : /// Shared storage for tokio-epoll-uring thread local metrics.
3588 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3589 238 : Lazy::new(|| {
3590 238 : let slots_submission_queue_depth = register_histogram!(
3591 238 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3592 238 : "The slots waiters queue depth of each tokio_epoll_uring system",
3593 238 : vec![
3594 238 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3595 238 : ],
3596 238 : )
3597 238 : .expect("failed to define a metric");
3598 238 : ThreadLocalMetricsStorage {
3599 238 : observers: Mutex::new(HashMap::new()),
3600 238 : slots_submission_queue_depth,
3601 238 : }
3602 238 : });
3603 :
3604 : pub struct ThreadLocalMetricsStorage {
3605 : /// List of thread local metrics observers.
3606 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3607 : /// A histogram shared between all thread local systems
3608 : /// for collecting slots submission queue depth.
3609 : slots_submission_queue_depth: Histogram,
3610 : }
3611 :
3612 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3613 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3614 : ///
3615 : /// The System makes observations into [`Self`] and periodically, the collector
3616 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3617 : ///
3618 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3619 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3620 : /// for cache coherence protocol to get an exclusive cache line.
3621 : pub struct ThreadLocalMetrics {
3622 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3623 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3624 : }
3625 :
3626 : impl ThreadLocalMetricsStorage {
3627 : /// Registers a new thread local system. Returns a thread local metrics observer.
3628 1043 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3629 1043 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3630 1043 : self.slots_submission_queue_depth.local(),
3631 1043 : ));
3632 1043 : let mut g = self.observers.lock().unwrap();
3633 1043 : g.insert(id, Arc::clone(&per_system_metrics));
3634 1043 : per_system_metrics
3635 1043 : }
3636 :
3637 : /// Removes metrics observer for a thread local system.
3638 : /// This should be called before dropping a thread local system.
3639 238 : pub fn remove_system(&self, id: u64) {
3640 238 : let mut g = self.observers.lock().unwrap();
3641 238 : g.remove(&id);
3642 238 : }
3643 :
3644 : /// Flush all thread local metrics to the shared storage.
3645 0 : pub fn flush_thread_local_metrics(&self) {
3646 0 : let g = self.observers.lock().unwrap();
3647 0 : g.values().for_each(|local| {
3648 0 : local.flush();
3649 0 : });
3650 0 : }
3651 : }
3652 :
3653 : impl ThreadLocalMetrics {
3654 1043 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3655 1043 : ThreadLocalMetrics {
3656 1043 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3657 1043 : }
3658 1043 : }
3659 :
3660 : /// Flushes the thread local metrics to shared aggregator.
3661 0 : pub fn flush(&self) {
3662 0 : let Self {
3663 0 : slots_submission_queue_depth,
3664 0 : } = self;
3665 0 : slots_submission_queue_depth.lock().unwrap().flush();
3666 0 : }
3667 : }
3668 :
3669 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3670 1818746 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3671 1818746 : let Self {
3672 1818746 : slots_submission_queue_depth,
3673 1818746 : } = self;
3674 1818746 : slots_submission_queue_depth
3675 1818746 : .lock()
3676 1818746 : .unwrap()
3677 1818746 : .observe(queue_depth as f64);
3678 1818746 : }
3679 : }
3680 :
3681 : pub struct Collector {
3682 : descs: Vec<metrics::core::Desc>,
3683 : systems_created: UIntGauge,
3684 : systems_destroyed: UIntGauge,
3685 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3686 : }
3687 :
3688 : impl metrics::core::Collector for Collector {
3689 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3690 0 : self.descs.iter().collect()
3691 0 : }
3692 :
3693 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3694 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3695 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
3696 0 : systems_created,
3697 0 : systems_destroyed,
3698 0 : } = tokio_epoll_uring::metrics::global();
3699 0 : self.systems_created.set(systems_created);
3700 0 : mfs.extend(self.systems_created.collect());
3701 0 : self.systems_destroyed.set(systems_destroyed);
3702 0 : mfs.extend(self.systems_destroyed.collect());
3703 0 :
3704 0 : self.thread_local_metrics_storage
3705 0 : .flush_thread_local_metrics();
3706 0 :
3707 0 : mfs.extend(
3708 0 : self.thread_local_metrics_storage
3709 0 : .slots_submission_queue_depth
3710 0 : .collect(),
3711 0 : );
3712 0 : mfs
3713 0 : }
3714 : }
3715 :
3716 : impl Collector {
3717 : const NMETRICS: usize = 3;
3718 :
3719 : #[allow(clippy::new_without_default)]
3720 0 : pub fn new() -> Self {
3721 0 : let mut descs = Vec::new();
3722 0 :
3723 0 : let systems_created = UIntGauge::new(
3724 0 : "pageserver_tokio_epoll_uring_systems_created",
3725 0 : "counter of tokio-epoll-uring systems that were created",
3726 0 : )
3727 0 : .unwrap();
3728 0 : descs.extend(
3729 0 : metrics::core::Collector::desc(&systems_created)
3730 0 : .into_iter()
3731 0 : .cloned(),
3732 0 : );
3733 0 :
3734 0 : let systems_destroyed = UIntGauge::new(
3735 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
3736 0 : "counter of tokio-epoll-uring systems that were destroyed",
3737 0 : )
3738 0 : .unwrap();
3739 0 : descs.extend(
3740 0 : metrics::core::Collector::desc(&systems_destroyed)
3741 0 : .into_iter()
3742 0 : .cloned(),
3743 0 : );
3744 0 :
3745 0 : Self {
3746 0 : descs,
3747 0 : systems_created,
3748 0 : systems_destroyed,
3749 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
3750 0 : }
3751 0 : }
3752 : }
3753 :
3754 238 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3755 238 : register_int_counter!(
3756 238 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
3757 238 : "Number of times where thread_local_system creation spanned multiple executor threads",
3758 238 : )
3759 238 : .unwrap()
3760 238 : });
3761 :
3762 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3763 0 : register_int_counter!(
3764 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
3765 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
3766 0 : )
3767 0 : .unwrap()
3768 0 : });
3769 : }
3770 :
3771 : pub(crate) mod tenant_throttling {
3772 : use metrics::{IntCounter, register_int_counter_vec};
3773 : use once_cell::sync::Lazy;
3774 : use utils::shard::TenantShardId;
3775 :
3776 : pub(crate) struct GlobalAndPerTenantIntCounter {
3777 : global: IntCounter,
3778 : per_tenant: IntCounter,
3779 : }
3780 :
3781 : impl GlobalAndPerTenantIntCounter {
3782 : #[inline(always)]
3783 0 : pub(crate) fn inc(&self) {
3784 0 : self.inc_by(1)
3785 0 : }
3786 : #[inline(always)]
3787 0 : pub(crate) fn inc_by(&self, n: u64) {
3788 0 : self.global.inc_by(n);
3789 0 : self.per_tenant.inc_by(n);
3790 0 : }
3791 : }
3792 :
3793 : pub(crate) struct Metrics<const KIND: usize> {
3794 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
3795 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
3796 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
3797 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
3798 : }
3799 :
3800 416 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3801 416 : register_int_counter_vec!(
3802 416 : "pageserver_tenant_throttling_count_accounted_start_global",
3803 416 : "Count of tenant throttling starts, by kind of throttle.",
3804 416 : &["kind"]
3805 416 : )
3806 416 : .unwrap()
3807 416 : });
3808 416 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3809 416 : register_int_counter_vec!(
3810 416 : "pageserver_tenant_throttling_count_accounted_start",
3811 416 : "Count of tenant throttling starts, by kind of throttle.",
3812 416 : &["kind", "tenant_id", "shard_id"]
3813 416 : )
3814 416 : .unwrap()
3815 416 : });
3816 416 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3817 416 : register_int_counter_vec!(
3818 416 : "pageserver_tenant_throttling_count_accounted_finish_global",
3819 416 : "Count of tenant throttling finishes, by kind of throttle.",
3820 416 : &["kind"]
3821 416 : )
3822 416 : .unwrap()
3823 416 : });
3824 416 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3825 416 : register_int_counter_vec!(
3826 416 : "pageserver_tenant_throttling_count_accounted_finish",
3827 416 : "Count of tenant throttling finishes, by kind of throttle.",
3828 416 : &["kind", "tenant_id", "shard_id"]
3829 416 : )
3830 416 : .unwrap()
3831 416 : });
3832 416 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3833 416 : register_int_counter_vec!(
3834 416 : "pageserver_tenant_throttling_wait_usecs_sum_global",
3835 416 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3836 416 : &["kind"]
3837 416 : )
3838 416 : .unwrap()
3839 416 : });
3840 416 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3841 416 : register_int_counter_vec!(
3842 416 : "pageserver_tenant_throttling_wait_usecs_sum",
3843 416 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3844 416 : &["kind", "tenant_id", "shard_id"]
3845 416 : )
3846 416 : .unwrap()
3847 416 : });
3848 :
3849 416 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3850 416 : register_int_counter_vec!(
3851 416 : "pageserver_tenant_throttling_count_global",
3852 416 : "Count of tenant throttlings, by kind of throttle.",
3853 416 : &["kind"]
3854 416 : )
3855 416 : .unwrap()
3856 416 : });
3857 416 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3858 416 : register_int_counter_vec!(
3859 416 : "pageserver_tenant_throttling_count",
3860 416 : "Count of tenant throttlings, by kind of throttle.",
3861 416 : &["kind", "tenant_id", "shard_id"]
3862 416 : )
3863 416 : .unwrap()
3864 416 : });
3865 :
3866 : const KINDS: &[&str] = &["pagestream"];
3867 : pub type Pagestream = Metrics<0>;
3868 :
3869 : impl<const KIND: usize> Metrics<KIND> {
3870 452 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
3871 452 : let per_tenant_label_values = &[
3872 452 : KINDS[KIND],
3873 452 : &tenant_shard_id.tenant_id.to_string(),
3874 452 : &tenant_shard_id.shard_slug().to_string(),
3875 452 : ];
3876 452 : Metrics {
3877 452 : count_accounted_start: {
3878 452 : GlobalAndPerTenantIntCounter {
3879 452 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
3880 452 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
3881 452 : .with_label_values(per_tenant_label_values),
3882 452 : }
3883 452 : },
3884 452 : count_accounted_finish: {
3885 452 : GlobalAndPerTenantIntCounter {
3886 452 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
3887 452 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
3888 452 : .with_label_values(per_tenant_label_values),
3889 452 : }
3890 452 : },
3891 452 : wait_time: {
3892 452 : GlobalAndPerTenantIntCounter {
3893 452 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
3894 452 : per_tenant: WAIT_USECS_PER_TENANT
3895 452 : .with_label_values(per_tenant_label_values),
3896 452 : }
3897 452 : },
3898 452 : count_throttled: {
3899 452 : GlobalAndPerTenantIntCounter {
3900 452 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
3901 452 : per_tenant: WAIT_COUNT_PER_TENANT
3902 452 : .with_label_values(per_tenant_label_values),
3903 452 : }
3904 452 : },
3905 452 : }
3906 452 : }
3907 : }
3908 :
3909 0 : pub(crate) fn preinitialize_global_metrics() {
3910 0 : Lazy::force(&COUNT_ACCOUNTED_START);
3911 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
3912 0 : Lazy::force(&WAIT_USECS);
3913 0 : Lazy::force(&WAIT_COUNT);
3914 0 : }
3915 :
3916 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3917 48 : for m in &[
3918 12 : &COUNT_ACCOUNTED_START_PER_TENANT,
3919 12 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
3920 12 : &WAIT_USECS_PER_TENANT,
3921 12 : &WAIT_COUNT_PER_TENANT,
3922 12 : ] {
3923 96 : for kind in KINDS {
3924 48 : let _ = m.remove_label_values(&[
3925 48 : kind,
3926 48 : &tenant_shard_id.tenant_id.to_string(),
3927 48 : &tenant_shard_id.shard_slug().to_string(),
3928 48 : ]);
3929 48 : }
3930 : }
3931 12 : }
3932 : }
3933 :
3934 : pub(crate) mod disk_usage_based_eviction {
3935 : use super::*;
3936 :
3937 : pub(crate) struct Metrics {
3938 : pub(crate) tenant_collection_time: Histogram,
3939 : pub(crate) tenant_layer_count: Histogram,
3940 : pub(crate) layers_collected: IntCounter,
3941 : pub(crate) layers_selected: IntCounter,
3942 : pub(crate) layers_evicted: IntCounter,
3943 : }
3944 :
3945 : impl Default for Metrics {
3946 0 : fn default() -> Self {
3947 0 : let tenant_collection_time = register_histogram!(
3948 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
3949 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
3950 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
3951 0 : )
3952 0 : .unwrap();
3953 0 :
3954 0 : let tenant_layer_count = register_histogram!(
3955 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
3956 0 : "Amount of layers gathered from a tenant",
3957 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
3958 0 : )
3959 0 : .unwrap();
3960 0 :
3961 0 : let layers_collected = register_int_counter!(
3962 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
3963 0 : "Amount of layers collected"
3964 0 : )
3965 0 : .unwrap();
3966 0 :
3967 0 : let layers_selected = register_int_counter!(
3968 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
3969 0 : "Amount of layers selected"
3970 0 : )
3971 0 : .unwrap();
3972 0 :
3973 0 : let layers_evicted = register_int_counter!(
3974 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
3975 0 : "Amount of layers successfully evicted"
3976 0 : )
3977 0 : .unwrap();
3978 0 :
3979 0 : Self {
3980 0 : tenant_collection_time,
3981 0 : tenant_layer_count,
3982 0 : layers_collected,
3983 0 : layers_selected,
3984 0 : layers_evicted,
3985 0 : }
3986 0 : }
3987 : }
3988 :
3989 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
3990 : }
3991 :
3992 404 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
3993 404 : register_uint_gauge_vec!(
3994 404 : "pageserver_tokio_executor_thread_configured_count",
3995 404 : "Total number of configued tokio executor threads in the process.
3996 404 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
3997 404 : &["setup"],
3998 404 : )
3999 404 : .unwrap()
4000 404 : });
4001 :
4002 404 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4003 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4004 404 : let _guard = SERIALIZE.lock().unwrap();
4005 404 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4006 404 : TOKIO_EXECUTOR_THREAD_COUNT
4007 404 : .get_metric_with_label_values(&[setup])
4008 404 : .unwrap()
4009 404 : .set(u64::try_from(num_threads.get()).unwrap());
4010 404 : }
4011 :
4012 0 : pub fn preinitialize_metrics(conf: &'static PageServerConf) {
4013 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4014 0 :
4015 0 : // Python tests need these and on some we do alerting.
4016 0 : //
4017 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4018 0 : // order:
4019 0 : // - global metrics reside in a Lazy<PageserverMetrics>
4020 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4021 0 : // - could move the statics into TimelineMetrics::new()?
4022 0 :
4023 0 : // counters
4024 0 : [
4025 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4026 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4027 0 : &WALRECEIVER_BROKER_UPDATES,
4028 0 : &WALRECEIVER_CANDIDATES_ADDED,
4029 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4030 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4031 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4032 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4033 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4034 0 : &CIRCUIT_BREAKERS_BROKEN,
4035 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4036 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4037 0 : ]
4038 0 : .into_iter()
4039 0 : .for_each(|c| {
4040 0 : Lazy::force(c);
4041 0 : });
4042 0 :
4043 0 : // Deletion queue stats
4044 0 : Lazy::force(&DELETION_QUEUE);
4045 0 :
4046 0 : // Tenant stats
4047 0 : Lazy::force(&TENANT);
4048 0 :
4049 0 : // Tenant manager stats
4050 0 : Lazy::force(&TENANT_MANAGER);
4051 0 :
4052 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4053 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4054 :
4055 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4056 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4057 0 : // values from last restart.
4058 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4059 0 : }
4060 :
4061 : // countervecs
4062 0 : [
4063 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4064 0 : &SMGR_QUERY_STARTED_GLOBAL,
4065 0 : ]
4066 0 : .into_iter()
4067 0 : .for_each(|c| {
4068 0 : Lazy::force(c);
4069 0 : });
4070 0 :
4071 0 : // gauges
4072 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4073 0 :
4074 0 : // histograms
4075 0 : [
4076 0 : &LAYERS_PER_READ_GLOBAL,
4077 0 : &DELTAS_PER_READ_GLOBAL,
4078 0 : &WAIT_LSN_TIME,
4079 0 : &WAL_REDO_TIME,
4080 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4081 0 : &WAL_REDO_BYTES_HISTOGRAM,
4082 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4083 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4084 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4085 0 : ]
4086 0 : .into_iter()
4087 0 : .for_each(|h| {
4088 0 : Lazy::force(h);
4089 0 : });
4090 0 :
4091 0 : // Custom
4092 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4093 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4094 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4095 0 :
4096 0 : tenant_throttling::preinitialize_global_metrics();
4097 0 : }
|