Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::pin::Pin;
4 : use std::sync::atomic::AtomicU64;
5 : use std::sync::{Arc, Mutex};
6 : use std::task::{Context, Poll};
7 : use std::time::{Duration, Instant};
8 :
9 : use enum_map::EnumMap;
10 : use futures::Future;
11 : use metrics::{
12 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
13 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
14 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
15 : Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
16 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
17 : };
18 : use once_cell::sync::Lazy;
19 : use pageserver_api::config::{
20 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
21 : PageServiceProtocolPipelinedExecutionStrategy,
22 : };
23 : use pageserver_api::models::InMemoryLayerInfo;
24 : use pageserver_api::shard::TenantShardId;
25 : use pin_project_lite::pin_project;
26 : use postgres_backend::{is_expected_io_error, QueryError};
27 : use pq_proto::framed::ConnectionError;
28 :
29 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
30 : use strum_macros::{IntoStaticStr, VariantNames};
31 : use utils::id::TimelineId;
32 :
33 : use crate::config::PageServerConf;
34 : use crate::context::{PageContentKind, RequestContext};
35 : use crate::task_mgr::TaskKind;
36 : use crate::tenant::layer_map::LayerMap;
37 : use crate::tenant::mgr::TenantSlot;
38 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
39 : use crate::tenant::tasks::BackgroundLoopKind;
40 : use crate::tenant::throttle::ThrottleResult;
41 : use crate::tenant::Timeline;
42 :
43 : /// Prometheus histogram buckets (in seconds) for operations in the critical
44 : /// path. In other words, operations that directly affect that latency of user
45 : /// queries.
46 : ///
47 : /// The buckets capture the majority of latencies in the microsecond and
48 : /// millisecond range but also extend far enough up to distinguish "bad" from
49 : /// "really bad".
50 : const CRITICAL_OP_BUCKETS: &[f64] = &[
51 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
52 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
53 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
54 : ];
55 :
56 : // Metrics collected on operations on the storage repository.
57 : #[derive(Debug, VariantNames, IntoStaticStr)]
58 : #[strum(serialize_all = "kebab_case")]
59 : pub(crate) enum StorageTimeOperation {
60 : #[strum(serialize = "layer flush")]
61 : LayerFlush,
62 :
63 : #[strum(serialize = "layer flush delay")]
64 : LayerFlushDelay,
65 :
66 : #[strum(serialize = "compact")]
67 : Compact,
68 :
69 : #[strum(serialize = "create images")]
70 : CreateImages,
71 :
72 : #[strum(serialize = "logical size")]
73 : LogicalSize,
74 :
75 : #[strum(serialize = "imitate logical size")]
76 : ImitateLogicalSize,
77 :
78 : #[strum(serialize = "load layer map")]
79 : LoadLayerMap,
80 :
81 : #[strum(serialize = "gc")]
82 : Gc,
83 :
84 : #[strum(serialize = "find gc cutoffs")]
85 : FindGcCutoffs,
86 : }
87 :
88 400 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
89 400 : register_counter_vec!(
90 400 : "pageserver_storage_operations_seconds_sum",
91 400 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
92 400 : &["operation", "tenant_id", "shard_id", "timeline_id"],
93 400 : )
94 400 : .expect("failed to define a metric")
95 400 : });
96 :
97 400 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
98 400 : register_int_counter_vec!(
99 400 : "pageserver_storage_operations_seconds_count",
100 400 : "Count of storage operations with operation, tenant and timeline dimensions",
101 400 : &["operation", "tenant_id", "shard_id", "timeline_id"],
102 400 : )
103 400 : .expect("failed to define a metric")
104 400 : });
105 :
106 : // Buckets for background operations like compaction, GC, size calculation
107 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
108 :
109 400 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
110 400 : register_histogram_vec!(
111 400 : "pageserver_storage_operations_seconds_global",
112 400 : "Time spent on storage operations",
113 400 : &["operation"],
114 400 : STORAGE_OP_BUCKETS.into(),
115 400 : )
116 400 : .expect("failed to define a metric")
117 400 : });
118 :
119 : /// Measures layers visited per read (i.e. read amplification).
120 : ///
121 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
122 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
123 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
124 : /// care about.
125 400 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
126 400 : register_histogram_vec!(
127 400 : "pageserver_layers_per_read",
128 400 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
129 400 : &["tenant_id", "shard_id", "timeline_id"],
130 400 : // Low resolution to reduce cardinality.
131 400 : vec![1.0, 5.0, 10.0, 25.0, 50.0, 100.0],
132 400 : )
133 400 : .expect("failed to define a metric")
134 400 : });
135 :
136 392 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
137 392 : register_histogram!(
138 392 : "pageserver_layers_per_read_global",
139 392 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
140 392 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
141 392 : )
142 392 : .expect("failed to define a metric")
143 392 : });
144 :
145 392 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
146 392 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
147 392 : register_histogram!(
148 392 : "pageserver_deltas_per_read_global",
149 392 : "Number of delta pages applied to image page per read",
150 392 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
151 392 : )
152 392 : .expect("failed to define a metric")
153 392 : });
154 :
155 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
156 0 : register_uint_gauge!(
157 0 : "pageserver_concurrent_initdb",
158 0 : "Number of initdb processes running"
159 0 : )
160 0 : .expect("failed to define a metric")
161 0 : });
162 :
163 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
164 0 : register_histogram!(
165 0 : "pageserver_initdb_semaphore_seconds_global",
166 0 : "Time spent getting a permit from the global initdb semaphore",
167 0 : STORAGE_OP_BUCKETS.into()
168 0 : )
169 0 : .expect("failed to define metric")
170 0 : });
171 :
172 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
173 0 : register_histogram!(
174 0 : "pageserver_initdb_seconds_global",
175 0 : "Time spent performing initdb",
176 0 : STORAGE_OP_BUCKETS.into()
177 0 : )
178 0 : .expect("failed to define metric")
179 0 : });
180 :
181 : pub(crate) struct GetVectoredLatency {
182 : map: EnumMap<TaskKind, Option<Histogram>>,
183 : }
184 :
185 : #[allow(dead_code)]
186 : pub(crate) struct ScanLatency {
187 : map: EnumMap<TaskKind, Option<Histogram>>,
188 : }
189 :
190 : impl GetVectoredLatency {
191 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
192 : // cardinality of the metric.
193 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
194 :
195 39356 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
196 39356 : self.map[task_kind].as_ref()
197 39356 : }
198 : }
199 :
200 : impl ScanLatency {
201 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
202 : // cardinality of the metric.
203 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
204 :
205 24 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
206 24 : self.map[task_kind].as_ref()
207 24 : }
208 : }
209 :
210 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
211 : parent: &'a Histogram,
212 : start: std::time::Instant,
213 : }
214 :
215 : impl<'a> ScanLatencyOngoingRecording<'a> {
216 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
217 0 : let start = Instant::now();
218 0 : ScanLatencyOngoingRecording { parent, start }
219 0 : }
220 :
221 0 : pub(crate) fn observe(self) {
222 0 : let elapsed = self.start.elapsed();
223 0 : self.parent.observe(elapsed.as_secs_f64());
224 0 : }
225 : }
226 :
227 384 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
228 384 : let inner = register_histogram_vec!(
229 384 : "pageserver_get_vectored_seconds",
230 384 : "Time spent in get_vectored.",
231 384 : &["task_kind"],
232 384 : CRITICAL_OP_BUCKETS.into(),
233 384 : )
234 384 : .expect("failed to define a metric");
235 384 :
236 384 : GetVectoredLatency {
237 11904 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
238 11904 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
239 11904 :
240 11904 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
241 768 : let task_kind = task_kind.into();
242 768 : Some(inner.with_label_values(&[task_kind]))
243 : } else {
244 11136 : None
245 : }
246 11904 : })),
247 384 : }
248 384 : });
249 :
250 8 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
251 8 : let inner = register_histogram_vec!(
252 8 : "pageserver_scan_seconds",
253 8 : "Time spent in scan.",
254 8 : &["task_kind"],
255 8 : CRITICAL_OP_BUCKETS.into(),
256 8 : )
257 8 : .expect("failed to define a metric");
258 8 :
259 8 : ScanLatency {
260 248 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
261 248 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
262 248 :
263 248 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
264 8 : let task_kind = task_kind.into();
265 8 : Some(inner.with_label_values(&[task_kind]))
266 : } else {
267 240 : None
268 : }
269 248 : })),
270 8 : }
271 8 : });
272 :
273 : pub(crate) struct PageCacheMetricsForTaskKind {
274 : pub read_accesses_immutable: IntCounter,
275 : pub read_hits_immutable: IntCounter,
276 : }
277 :
278 : pub(crate) struct PageCacheMetrics {
279 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
280 : }
281 :
282 184 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
283 184 : register_int_counter_vec!(
284 184 : "pageserver_page_cache_read_hits_total",
285 184 : "Number of read accesses to the page cache that hit",
286 184 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
287 184 : )
288 184 : .expect("failed to define a metric")
289 184 : });
290 :
291 184 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
292 184 : register_int_counter_vec!(
293 184 : "pageserver_page_cache_read_accesses_total",
294 184 : "Number of read accesses to the page cache",
295 184 : &["task_kind", "key_kind", "content_kind"]
296 184 : )
297 184 : .expect("failed to define a metric")
298 184 : });
299 :
300 184 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
301 5704 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
302 5704 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind);
303 5704 : let task_kind: &'static str = task_kind.into();
304 45632 : EnumMap::from_array(std::array::from_fn(|content_kind| {
305 45632 : let content_kind = <PageContentKind as enum_map::Enum>::from_usize(content_kind);
306 45632 : let content_kind: &'static str = content_kind.into();
307 45632 : PageCacheMetricsForTaskKind {
308 45632 : read_accesses_immutable: {
309 45632 : PAGE_CACHE_READ_ACCESSES
310 45632 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
311 45632 : .unwrap()
312 45632 : },
313 45632 :
314 45632 : read_hits_immutable: {
315 45632 : PAGE_CACHE_READ_HITS
316 45632 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
317 45632 : .unwrap()
318 45632 : },
319 45632 : }
320 45632 : }))
321 5704 : })),
322 184 : });
323 :
324 : impl PageCacheMetrics {
325 1944556 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
326 1944556 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
327 1944556 : }
328 : }
329 :
330 : pub(crate) struct PageCacheSizeMetrics {
331 : pub max_bytes: UIntGauge,
332 :
333 : pub current_bytes_immutable: UIntGauge,
334 : }
335 :
336 184 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
337 184 : register_uint_gauge_vec!(
338 184 : "pageserver_page_cache_size_current_bytes",
339 184 : "Current size of the page cache in bytes, by key kind",
340 184 : &["key_kind"]
341 184 : )
342 184 : .expect("failed to define a metric")
343 184 : });
344 :
345 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
346 184 : Lazy::new(|| PageCacheSizeMetrics {
347 184 : max_bytes: {
348 184 : register_uint_gauge!(
349 184 : "pageserver_page_cache_size_max_bytes",
350 184 : "Maximum size of the page cache in bytes"
351 184 : )
352 184 : .expect("failed to define a metric")
353 184 : },
354 184 : current_bytes_immutable: {
355 184 : PAGE_CACHE_SIZE_CURRENT_BYTES
356 184 : .get_metric_with_label_values(&["immutable"])
357 184 : .unwrap()
358 184 : },
359 184 : });
360 :
361 : pub(crate) mod page_cache_eviction_metrics {
362 : use std::num::NonZeroUsize;
363 :
364 : use metrics::{register_int_counter_vec, IntCounter, IntCounterVec};
365 : use once_cell::sync::Lazy;
366 :
367 : #[derive(Clone, Copy)]
368 : pub(crate) enum Outcome {
369 : FoundSlotUnused { iters: NonZeroUsize },
370 : FoundSlotEvicted { iters: NonZeroUsize },
371 : ItersExceeded { iters: NonZeroUsize },
372 : }
373 :
374 184 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
375 184 : register_int_counter_vec!(
376 184 : "pageserver_page_cache_find_victim_iters_total",
377 184 : "Counter for the number of iterations in the find_victim loop",
378 184 : &["outcome"],
379 184 : )
380 184 : .expect("failed to define a metric")
381 184 : });
382 :
383 184 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
384 184 : register_int_counter_vec!(
385 184 : "pageserver_page_cache_find_victim_calls",
386 184 : "Incremented at the end of each find_victim() call.\
387 184 : Filter by outcome to get e.g., eviction rate.",
388 184 : &["outcome"]
389 184 : )
390 184 : .unwrap()
391 184 : });
392 :
393 63413 : pub(crate) fn observe(outcome: Outcome) {
394 : macro_rules! dry {
395 : ($label:literal, $iters:expr) => {{
396 : static LABEL: &'static str = $label;
397 : static ITERS_TOTAL: Lazy<IntCounter> =
398 224 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
399 : static CALLS: Lazy<IntCounter> =
400 224 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
401 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
402 : CALLS.inc();
403 : }};
404 : }
405 63413 : match outcome {
406 3272 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
407 60141 : Outcome::FoundSlotEvicted { iters } => {
408 60141 : dry!("found_evicted", iters)
409 : }
410 0 : Outcome::ItersExceeded { iters } => {
411 0 : dry!("err_iters_exceeded", iters);
412 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
413 0 : }
414 : }
415 63413 : }
416 : }
417 :
418 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
419 0 : register_int_counter_vec!(
420 0 : "page_cache_errors_total",
421 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
422 0 : &["error_kind"]
423 0 : )
424 0 : .expect("failed to define a metric")
425 0 : });
426 :
427 : #[derive(IntoStaticStr)]
428 : #[strum(serialize_all = "kebab_case")]
429 : pub(crate) enum PageCacheErrorKind {
430 : AcquirePinnedSlotTimeout,
431 : EvictIterLimit,
432 : }
433 :
434 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
435 0 : PAGE_CACHE_ERRORS
436 0 : .get_metric_with_label_values(&[error_kind.into()])
437 0 : .unwrap()
438 0 : .inc();
439 0 : }
440 :
441 40 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
442 40 : register_histogram!(
443 40 : "pageserver_wait_lsn_seconds",
444 40 : "Time spent waiting for WAL to arrive",
445 40 : CRITICAL_OP_BUCKETS.into(),
446 40 : )
447 40 : .expect("failed to define a metric")
448 40 : });
449 :
450 400 : static FLUSH_WAIT_UPLOAD_TIME: Lazy<GaugeVec> = Lazy::new(|| {
451 400 : register_gauge_vec!(
452 400 : "pageserver_flush_wait_upload_seconds",
453 400 : "Time spent waiting for preceding uploads during layer flush",
454 400 : &["tenant_id", "shard_id", "timeline_id"]
455 400 : )
456 400 : .expect("failed to define a metric")
457 400 : });
458 :
459 400 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
460 400 : register_int_gauge_vec!(
461 400 : "pageserver_last_record_lsn",
462 400 : "Last record LSN grouped by timeline",
463 400 : &["tenant_id", "shard_id", "timeline_id"]
464 400 : )
465 400 : .expect("failed to define a metric")
466 400 : });
467 :
468 400 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
469 400 : register_int_gauge_vec!(
470 400 : "pageserver_disk_consistent_lsn",
471 400 : "Disk consistent LSN grouped by timeline",
472 400 : &["tenant_id", "shard_id", "timeline_id"]
473 400 : )
474 400 : .expect("failed to define a metric")
475 400 : });
476 :
477 400 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
478 400 : register_uint_gauge_vec!(
479 400 : "pageserver_projected_remote_consistent_lsn",
480 400 : "Projected remote consistent LSN grouped by timeline",
481 400 : &["tenant_id", "shard_id", "timeline_id"]
482 400 : )
483 400 : .expect("failed to define a metric")
484 400 : });
485 :
486 400 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
487 400 : register_uint_gauge_vec!(
488 400 : "pageserver_pitr_history_size",
489 400 : "Data written since PITR cutoff on this timeline",
490 400 : &["tenant_id", "shard_id", "timeline_id"]
491 400 : )
492 400 : .expect("failed to define a metric")
493 400 : });
494 :
495 : #[derive(
496 240 : strum_macros::EnumIter,
497 0 : strum_macros::EnumString,
498 : strum_macros::Display,
499 : strum_macros::IntoStaticStr,
500 : )]
501 : #[strum(serialize_all = "kebab_case")]
502 : pub(crate) enum LayerKind {
503 : Delta,
504 : Image,
505 : }
506 :
507 : #[derive(
508 100 : strum_macros::EnumIter,
509 0 : strum_macros::EnumString,
510 : strum_macros::Display,
511 : strum_macros::IntoStaticStr,
512 : )]
513 : #[strum(serialize_all = "kebab_case")]
514 : pub(crate) enum LayerLevel {
515 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
516 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
517 : Frozen,
518 : L0,
519 : L1,
520 : }
521 :
522 392 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
523 392 : register_uint_gauge_vec!(
524 392 : "pageserver_layer_bytes",
525 392 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
526 392 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
527 392 : )
528 392 : .expect("failed to define a metric")
529 392 : });
530 :
531 392 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
532 392 : register_uint_gauge_vec!(
533 392 : "pageserver_layer_count",
534 392 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
535 392 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
536 392 : )
537 392 : .expect("failed to define a metric")
538 392 : });
539 :
540 400 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
541 400 : register_uint_gauge_vec!(
542 400 : "pageserver_archive_size",
543 400 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
544 400 : &["tenant_id", "shard_id", "timeline_id"]
545 400 : )
546 400 : .expect("failed to define a metric")
547 400 : });
548 :
549 400 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
550 400 : register_int_gauge_vec!(
551 400 : "pageserver_standby_horizon",
552 400 : "Standby apply LSN for which GC is hold off, by timeline.",
553 400 : &["tenant_id", "shard_id", "timeline_id"]
554 400 : )
555 400 : .expect("failed to define a metric")
556 400 : });
557 :
558 400 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
559 400 : register_uint_gauge_vec!(
560 400 : "pageserver_resident_physical_size",
561 400 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
562 400 : &["tenant_id", "shard_id", "timeline_id"]
563 400 : )
564 400 : .expect("failed to define a metric")
565 400 : });
566 :
567 400 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
568 400 : register_uint_gauge_vec!(
569 400 : "pageserver_visible_physical_size",
570 400 : "The size of the layer files present in the pageserver's filesystem.",
571 400 : &["tenant_id", "shard_id", "timeline_id"]
572 400 : )
573 400 : .expect("failed to define a metric")
574 400 : });
575 :
576 392 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
577 392 : register_uint_gauge!(
578 392 : "pageserver_resident_physical_size_global",
579 392 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
580 392 : )
581 392 : .expect("failed to define a metric")
582 392 : });
583 :
584 400 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
585 400 : register_uint_gauge_vec!(
586 400 : "pageserver_remote_physical_size",
587 400 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
588 400 : // Corollary: If any files are missing from the index part, they won't be included here.
589 400 : &["tenant_id", "shard_id", "timeline_id"]
590 400 : )
591 400 : .expect("failed to define a metric")
592 400 : });
593 :
594 400 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
595 400 : register_uint_gauge!(
596 400 : "pageserver_remote_physical_size_global",
597 400 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
598 400 : )
599 400 : .expect("failed to define a metric")
600 400 : });
601 :
602 8 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
603 8 : register_int_counter!(
604 8 : "pageserver_remote_ondemand_downloaded_layers_total",
605 8 : "Total on-demand downloaded layers"
606 8 : )
607 8 : .unwrap()
608 8 : });
609 :
610 8 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
611 8 : register_int_counter!(
612 8 : "pageserver_remote_ondemand_downloaded_bytes_total",
613 8 : "Total bytes of layers on-demand downloaded",
614 8 : )
615 8 : .unwrap()
616 8 : });
617 :
618 400 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
619 400 : register_uint_gauge_vec!(
620 400 : "pageserver_current_logical_size",
621 400 : "Current logical size grouped by timeline",
622 400 : &["tenant_id", "shard_id", "timeline_id"]
623 400 : )
624 400 : .expect("failed to define current logical size metric")
625 400 : });
626 :
627 400 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
628 400 : register_int_gauge_vec!(
629 400 : "pageserver_aux_file_estimated_size",
630 400 : "The size of all aux files for a timeline in aux file v2 store.",
631 400 : &["tenant_id", "shard_id", "timeline_id"]
632 400 : )
633 400 : .expect("failed to define a metric")
634 400 : });
635 :
636 400 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
637 400 : register_uint_gauge_vec!(
638 400 : "pageserver_valid_lsn_lease_count",
639 400 : "The number of valid leases after refreshing gc info.",
640 400 : &["tenant_id", "shard_id", "timeline_id"],
641 400 : )
642 400 : .expect("failed to define a metric")
643 400 : });
644 :
645 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
646 0 : register_int_counter!(
647 0 : "pageserver_circuit_breaker_broken",
648 0 : "How many times a circuit breaker has broken"
649 0 : )
650 0 : .expect("failed to define a metric")
651 0 : });
652 :
653 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
654 0 : register_int_counter!(
655 0 : "pageserver_circuit_breaker_unbroken",
656 0 : "How many times a circuit breaker has been un-broken (recovered)"
657 0 : )
658 0 : .expect("failed to define a metric")
659 0 : });
660 :
661 384 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
662 384 : register_int_counter!(
663 384 : "pageserver_compression_image_in_bytes_total",
664 384 : "Size of data written into image layers before compression"
665 384 : )
666 384 : .expect("failed to define a metric")
667 384 : });
668 :
669 384 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
670 384 : register_int_counter!(
671 384 : "pageserver_compression_image_in_bytes_considered",
672 384 : "Size of potentially compressible data written into image layers before compression"
673 384 : )
674 384 : .expect("failed to define a metric")
675 384 : });
676 :
677 384 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
678 384 : register_int_counter!(
679 384 : "pageserver_compression_image_in_bytes_chosen",
680 384 : "Size of data whose compressed form was written into image layers"
681 384 : )
682 384 : .expect("failed to define a metric")
683 384 : });
684 :
685 384 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
686 384 : register_int_counter!(
687 384 : "pageserver_compression_image_out_bytes_total",
688 384 : "Size of compressed image layer written"
689 384 : )
690 384 : .expect("failed to define a metric")
691 384 : });
692 :
693 20 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
694 20 : register_uint_gauge!(
695 20 : "pageserver_relsize_cache_entries",
696 20 : "Number of entries in the relation size cache",
697 20 : )
698 20 : .expect("failed to define a metric")
699 20 : });
700 :
701 20 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
702 20 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
703 20 : .expect("failed to define a metric")
704 20 : });
705 :
706 20 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
707 20 : register_int_counter!(
708 20 : "pageserver_relsize_cache_misses",
709 20 : "Relation size cache misses",
710 20 : )
711 20 : .expect("failed to define a metric")
712 20 : });
713 :
714 8 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
715 8 : register_int_counter!(
716 8 : "pageserver_relsize_cache_misses_old",
717 8 : "Relation size cache misses where the lookup LSN is older than the last relation update"
718 8 : )
719 8 : .expect("failed to define a metric")
720 8 : });
721 :
722 : pub(crate) mod initial_logical_size {
723 : use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
724 : use once_cell::sync::Lazy;
725 :
726 : pub(crate) struct StartCalculation(IntCounterVec);
727 400 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
728 400 : StartCalculation(
729 400 : register_int_counter_vec!(
730 400 : "pageserver_initial_logical_size_start_calculation",
731 400 : "Incremented each time we start an initial logical size calculation attempt. \
732 400 : The `circumstances` label provides some additional details.",
733 400 : &["attempt", "circumstances"]
734 400 : )
735 400 : .unwrap(),
736 400 : )
737 400 : });
738 :
739 : struct DropCalculation {
740 : first: IntCounter,
741 : retry: IntCounter,
742 : }
743 :
744 400 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
745 400 : let vec = register_int_counter_vec!(
746 400 : "pageserver_initial_logical_size_drop_calculation",
747 400 : "Incremented each time we abort a started size calculation attmpt.",
748 400 : &["attempt"]
749 400 : )
750 400 : .unwrap();
751 400 : DropCalculation {
752 400 : first: vec.with_label_values(&["first"]),
753 400 : retry: vec.with_label_values(&["retry"]),
754 400 : }
755 400 : });
756 :
757 : pub(crate) struct Calculated {
758 : pub(crate) births: IntCounter,
759 : pub(crate) deaths: IntCounter,
760 : }
761 :
762 400 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
763 400 : births: register_int_counter!(
764 400 : "pageserver_initial_logical_size_finish_calculation",
765 400 : "Incremented every time we finish calculation of initial logical size.\
766 400 : If everything is working well, this should happen at most once per Timeline object."
767 400 : )
768 400 : .unwrap(),
769 400 : deaths: register_int_counter!(
770 400 : "pageserver_initial_logical_size_drop_finished_calculation",
771 400 : "Incremented when we drop a finished initial logical size calculation result.\
772 400 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
773 400 : )
774 400 : .unwrap(),
775 400 : });
776 :
777 : pub(crate) struct OngoingCalculationGuard {
778 : inc_drop_calculation: Option<IntCounter>,
779 : }
780 :
781 : #[derive(strum_macros::IntoStaticStr)]
782 : pub(crate) enum StartCircumstances {
783 : EmptyInitial,
784 : SkippedConcurrencyLimiter,
785 : AfterBackgroundTasksRateLimit,
786 : }
787 :
788 : impl StartCalculation {
789 424 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
790 424 : let circumstances_label: &'static str = circumstances.into();
791 424 : self.0
792 424 : .with_label_values(&["first", circumstances_label])
793 424 : .inc();
794 424 : OngoingCalculationGuard {
795 424 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
796 424 : }
797 424 : }
798 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
799 0 : let circumstances_label: &'static str = circumstances.into();
800 0 : self.0
801 0 : .with_label_values(&["retry", circumstances_label])
802 0 : .inc();
803 0 : OngoingCalculationGuard {
804 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
805 0 : }
806 0 : }
807 : }
808 :
809 : impl Drop for OngoingCalculationGuard {
810 424 : fn drop(&mut self) {
811 424 : if let Some(counter) = self.inc_drop_calculation.take() {
812 0 : counter.inc();
813 424 : }
814 424 : }
815 : }
816 :
817 : impl OngoingCalculationGuard {
818 424 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
819 424 : drop(self.inc_drop_calculation.take());
820 424 : CALCULATED.births.inc();
821 424 : FinishedCalculationGuard {
822 424 : inc_on_drop: CALCULATED.deaths.clone(),
823 424 : }
824 424 : }
825 : }
826 :
827 : pub(crate) struct FinishedCalculationGuard {
828 : inc_on_drop: IntCounter,
829 : }
830 :
831 : impl Drop for FinishedCalculationGuard {
832 12 : fn drop(&mut self) {
833 12 : self.inc_on_drop.inc();
834 12 : }
835 : }
836 :
837 : // context: https://github.com/neondatabase/neon/issues/5963
838 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
839 0 : Lazy::new(|| {
840 0 : register_int_counter!(
841 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
842 0 : "Counter for the following event: walreceiver calls\
843 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
844 0 : )
845 0 : .unwrap()
846 0 : });
847 : }
848 :
849 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
850 0 : register_uint_gauge_vec!(
851 0 : "pageserver_directory_entries_count",
852 0 : "Sum of the entries in pageserver-stored directory listings",
853 0 : &["tenant_id", "shard_id", "timeline_id"]
854 0 : )
855 0 : .expect("failed to define a metric")
856 0 : });
857 :
858 404 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
859 404 : register_uint_gauge_vec!(
860 404 : "pageserver_tenant_states_count",
861 404 : "Count of tenants per state",
862 404 : &["state"]
863 404 : )
864 404 : .expect("Failed to register pageserver_tenant_states_count metric")
865 404 : });
866 :
867 : /// A set of broken tenants.
868 : ///
869 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
870 : /// tenant.
871 20 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
872 20 : register_uint_gauge_vec!(
873 20 : "pageserver_broken_tenants_count",
874 20 : "Set of broken tenants",
875 20 : &["tenant_id", "shard_id"]
876 20 : )
877 20 : .expect("Failed to register pageserver_tenant_states_count metric")
878 20 : });
879 :
880 12 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
881 12 : register_uint_gauge_vec!(
882 12 : "pageserver_tenant_synthetic_cached_size_bytes",
883 12 : "Synthetic size of each tenant in bytes",
884 12 : &["tenant_id"]
885 12 : )
886 12 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
887 12 : });
888 :
889 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
890 0 : register_histogram_vec!(
891 0 : "pageserver_eviction_iteration_duration_seconds_global",
892 0 : "Time spent on a single eviction iteration",
893 0 : &["period_secs", "threshold_secs"],
894 0 : STORAGE_OP_BUCKETS.into(),
895 0 : )
896 0 : .expect("failed to define a metric")
897 0 : });
898 :
899 400 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
900 400 : register_int_counter_vec!(
901 400 : "pageserver_evictions",
902 400 : "Number of layers evicted from the pageserver",
903 400 : &["tenant_id", "shard_id", "timeline_id"]
904 400 : )
905 400 : .expect("failed to define a metric")
906 400 : });
907 :
908 400 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
909 400 : register_int_counter_vec!(
910 400 : "pageserver_evictions_with_low_residence_duration",
911 400 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
912 400 : Residence duration is determined using the `residence_duration_data_source`.",
913 400 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
914 400 : )
915 400 : .expect("failed to define a metric")
916 400 : });
917 :
918 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
919 0 : register_int_counter!(
920 0 : "pageserver_unexpected_ondemand_downloads_count",
921 0 : "Number of unexpected on-demand downloads. \
922 0 : We log more context for each increment, so, forgo any labels in this metric.",
923 0 : )
924 0 : .expect("failed to define a metric")
925 0 : });
926 :
927 : /// How long did we take to start up? Broken down by labels to describe
928 : /// different phases of startup.
929 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
930 0 : register_gauge_vec!(
931 0 : "pageserver_startup_duration_seconds",
932 0 : "Time taken by phases of pageserver startup, in seconds",
933 0 : &["phase"]
934 0 : )
935 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
936 0 : });
937 :
938 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
939 0 : register_uint_gauge!(
940 0 : "pageserver_startup_is_loading",
941 0 : "1 while in initial startup load of tenants, 0 at other times"
942 0 : )
943 0 : .expect("Failed to register pageserver_startup_is_loading")
944 0 : });
945 :
946 392 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
947 392 : register_uint_gauge!(
948 392 : "pageserver_timeline_ephemeral_bytes",
949 392 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
950 392 : )
951 392 : .expect("Failed to register metric")
952 392 : });
953 :
954 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
955 : /// like how long it took to load.
956 : ///
957 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
958 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
959 : /// at a timeline level than tenant level.
960 : pub(crate) struct TenantMetrics {
961 : /// How long did tenants take to go from construction to active state?
962 : pub(crate) activation: Histogram,
963 : pub(crate) preload: Histogram,
964 : pub(crate) attach: Histogram,
965 :
966 : /// How many tenants are included in the initial startup of the pagesrever?
967 : pub(crate) startup_scheduled: IntCounter,
968 : pub(crate) startup_complete: IntCounter,
969 : }
970 :
971 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
972 0 : TenantMetrics {
973 0 : activation: register_histogram!(
974 0 : "pageserver_tenant_activation_seconds",
975 0 : "Time taken by tenants to activate, in seconds",
976 0 : CRITICAL_OP_BUCKETS.into()
977 0 : )
978 0 : .expect("Failed to register metric"),
979 0 : preload: register_histogram!(
980 0 : "pageserver_tenant_preload_seconds",
981 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
982 0 : CRITICAL_OP_BUCKETS.into()
983 0 : )
984 0 : .expect("Failed to register metric"),
985 0 : attach: register_histogram!(
986 0 : "pageserver_tenant_attach_seconds",
987 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
988 0 : CRITICAL_OP_BUCKETS.into()
989 0 : )
990 0 : .expect("Failed to register metric"),
991 0 : startup_scheduled: register_int_counter!(
992 0 : "pageserver_tenant_startup_scheduled",
993 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
994 0 : ).expect("Failed to register metric"),
995 0 : startup_complete: register_int_counter!(
996 0 : "pageserver_tenant_startup_complete",
997 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
998 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
999 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1000 0 : ).expect("Failed to register metric"),
1001 0 : }
1002 0 : });
1003 :
1004 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1005 : #[derive(Debug)]
1006 : pub(crate) struct EvictionsWithLowResidenceDuration {
1007 : data_source: &'static str,
1008 : threshold: Duration,
1009 : counter: Option<IntCounter>,
1010 : }
1011 :
1012 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1013 : data_source: &'static str,
1014 : threshold: Duration,
1015 : }
1016 :
1017 : impl EvictionsWithLowResidenceDurationBuilder {
1018 892 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1019 892 : Self {
1020 892 : data_source,
1021 892 : threshold,
1022 892 : }
1023 892 : }
1024 :
1025 892 : fn build(
1026 892 : &self,
1027 892 : tenant_id: &str,
1028 892 : shard_id: &str,
1029 892 : timeline_id: &str,
1030 892 : ) -> EvictionsWithLowResidenceDuration {
1031 892 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1032 892 : .get_metric_with_label_values(&[
1033 892 : tenant_id,
1034 892 : shard_id,
1035 892 : timeline_id,
1036 892 : self.data_source,
1037 892 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1038 892 : ])
1039 892 : .unwrap();
1040 892 : EvictionsWithLowResidenceDuration {
1041 892 : data_source: self.data_source,
1042 892 : threshold: self.threshold,
1043 892 : counter: Some(counter),
1044 892 : }
1045 892 : }
1046 : }
1047 :
1048 : impl EvictionsWithLowResidenceDuration {
1049 912 : fn threshold_label_value(threshold: Duration) -> String {
1050 912 : format!("{}", threshold.as_secs())
1051 912 : }
1052 :
1053 8 : pub fn observe(&self, observed_value: Duration) {
1054 8 : if observed_value < self.threshold {
1055 8 : self.counter
1056 8 : .as_ref()
1057 8 : .expect("nobody calls this function after `remove_from_vec`")
1058 8 : .inc();
1059 8 : }
1060 8 : }
1061 :
1062 0 : pub fn change_threshold(
1063 0 : &mut self,
1064 0 : tenant_id: &str,
1065 0 : shard_id: &str,
1066 0 : timeline_id: &str,
1067 0 : new_threshold: Duration,
1068 0 : ) {
1069 0 : if new_threshold == self.threshold {
1070 0 : return;
1071 0 : }
1072 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1073 0 : self.data_source,
1074 0 : new_threshold,
1075 0 : )
1076 0 : .build(tenant_id, shard_id, timeline_id);
1077 0 : std::mem::swap(self, &mut with_new);
1078 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1079 0 : }
1080 :
1081 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1082 20 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1083 20 : let Some(_counter) = self.counter.take() else {
1084 0 : return;
1085 : };
1086 :
1087 20 : let threshold = Self::threshold_label_value(self.threshold);
1088 20 :
1089 20 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1090 20 : tenant_id,
1091 20 : shard_id,
1092 20 : timeline_id,
1093 20 : self.data_source,
1094 20 : &threshold,
1095 20 : ]);
1096 20 :
1097 20 : match removed {
1098 0 : Err(e) => {
1099 0 : // this has been hit in staging as
1100 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1101 0 : // because we can be in the drop path already, don't risk:
1102 0 : // - "double-panic => illegal instruction" or
1103 0 : // - future "drop panick => abort"
1104 0 : //
1105 0 : // so just nag: (the error has the labels)
1106 0 : tracing::warn!("failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}");
1107 : }
1108 : Ok(()) => {
1109 : // to help identify cases where we double-remove the same values, let's log all
1110 : // deletions?
1111 20 : tracing::info!("removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}", self.data_source);
1112 : }
1113 : }
1114 20 : }
1115 : }
1116 :
1117 : // Metrics collected on disk IO operations
1118 : //
1119 : // Roughly logarithmic scale.
1120 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1121 : 0.000030, // 30 usec
1122 : 0.001000, // 1000 usec
1123 : 0.030, // 30 ms
1124 : 1.000, // 1000 ms
1125 : 30.000, // 30000 ms
1126 : ];
1127 :
1128 : /// VirtualFile fs operation variants.
1129 : ///
1130 : /// Operations:
1131 : /// - open ([`std::fs::OpenOptions::open`])
1132 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1133 : /// - close-by-replace (close by replacement algorithm)
1134 : /// - read (`read_at`)
1135 : /// - write (`write_at`)
1136 : /// - seek (modify internal position or file length query)
1137 : /// - fsync ([`std::fs::File::sync_all`])
1138 : /// - metadata ([`std::fs::File::metadata`])
1139 : #[derive(
1140 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1141 : )]
1142 : pub(crate) enum StorageIoOperation {
1143 : Open,
1144 : OpenAfterReplace,
1145 : Close,
1146 : CloseByReplace,
1147 : Read,
1148 : Write,
1149 : Seek,
1150 : Fsync,
1151 : Metadata,
1152 : }
1153 :
1154 : impl StorageIoOperation {
1155 4140 : pub fn as_str(&self) -> &'static str {
1156 4140 : match self {
1157 460 : StorageIoOperation::Open => "open",
1158 460 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1159 460 : StorageIoOperation::Close => "close",
1160 460 : StorageIoOperation::CloseByReplace => "close-by-replace",
1161 460 : StorageIoOperation::Read => "read",
1162 460 : StorageIoOperation::Write => "write",
1163 460 : StorageIoOperation::Seek => "seek",
1164 460 : StorageIoOperation::Fsync => "fsync",
1165 460 : StorageIoOperation::Metadata => "metadata",
1166 : }
1167 4140 : }
1168 : }
1169 :
1170 : /// Tracks time taken by fs operations near VirtualFile.
1171 : #[derive(Debug)]
1172 : pub(crate) struct StorageIoTime {
1173 : metrics: [Histogram; StorageIoOperation::COUNT],
1174 : }
1175 :
1176 : impl StorageIoTime {
1177 460 : fn new() -> Self {
1178 460 : let storage_io_histogram_vec = register_histogram_vec!(
1179 460 : "pageserver_io_operations_seconds",
1180 460 : "Time spent in IO operations",
1181 460 : &["operation"],
1182 460 : STORAGE_IO_TIME_BUCKETS.into()
1183 460 : )
1184 460 : .expect("failed to define a metric");
1185 4140 : let metrics = std::array::from_fn(|i| {
1186 4140 : let op = StorageIoOperation::from_repr(i).unwrap();
1187 4140 : storage_io_histogram_vec
1188 4140 : .get_metric_with_label_values(&[op.as_str()])
1189 4140 : .unwrap()
1190 4140 : });
1191 460 : Self { metrics }
1192 460 : }
1193 :
1194 4031111 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1195 4031111 : &self.metrics[op as usize]
1196 4031111 : }
1197 : }
1198 :
1199 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1200 :
1201 : const STORAGE_IO_SIZE_OPERATIONS: &[&str] = &["read", "write"];
1202 :
1203 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1204 452 : pub(crate) static STORAGE_IO_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1205 452 : register_int_gauge_vec!(
1206 452 : "pageserver_io_operations_bytes_total",
1207 452 : "Total amount of bytes read/written in IO operations",
1208 452 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1209 452 : )
1210 452 : .expect("failed to define a metric")
1211 452 : });
1212 :
1213 : #[cfg(not(test))]
1214 : pub(crate) mod virtual_file_descriptor_cache {
1215 : use super::*;
1216 :
1217 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1218 0 : register_uint_gauge!(
1219 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1220 0 : "Maximum number of open file descriptors in the cache."
1221 0 : )
1222 0 : .unwrap()
1223 0 : });
1224 :
1225 : // SIZE_CURRENT: derive it like so:
1226 : // ```
1227 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1228 : // -ignoring(operation)
1229 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1230 : // ```
1231 : }
1232 :
1233 : #[cfg(not(test))]
1234 : pub(crate) mod virtual_file_io_engine {
1235 : use super::*;
1236 :
1237 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1238 0 : register_uint_gauge_vec!(
1239 0 : "pageserver_virtual_file_io_engine_kind",
1240 0 : "The configured io engine for VirtualFile",
1241 0 : &["kind"],
1242 0 : )
1243 0 : .unwrap()
1244 0 : });
1245 : }
1246 :
1247 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1248 : pub(crate) struct SmgrOpTimerInner {
1249 : global_execution_latency_histo: Histogram,
1250 : per_timeline_execution_latency_histo: Option<Histogram>,
1251 :
1252 : global_batch_wait_time: Histogram,
1253 : per_timeline_batch_wait_time: Histogram,
1254 :
1255 : global_flush_in_progress_micros: IntCounter,
1256 : per_timeline_flush_in_progress_micros: IntCounter,
1257 :
1258 : throttling: Arc<tenant_throttling::Pagestream>,
1259 :
1260 : timings: SmgrOpTimerState,
1261 : }
1262 :
1263 : /// The stages of request processing are represented by the enum variants.
1264 : /// Used as part of [`SmgrOpTimerInner::timings`].
1265 : ///
1266 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1267 : /// transition points.
1268 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1269 : /// to the next state.
1270 : ///
1271 : /// Each request goes through every stage, in all configurations.
1272 : ///
1273 : #[derive(Debug)]
1274 : enum SmgrOpTimerState {
1275 : Received {
1276 : // In the future, we may want to track the full time the request spent
1277 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1278 : // `received_at` would be used for that.
1279 : #[allow(dead_code)]
1280 : received_at: Instant,
1281 : },
1282 : Throttling {
1283 : throttle_started_at: Instant,
1284 : },
1285 : Batching {
1286 : throttle_done_at: Instant,
1287 : },
1288 : Executing {
1289 : execution_started_at: Instant,
1290 : },
1291 : Flushing,
1292 : // NB: when adding observation points, remember to update the Drop impl.
1293 : }
1294 :
1295 : // NB: when adding observation points, remember to update the Drop impl.
1296 : impl SmgrOpTimer {
1297 : /// See [`SmgrOpTimerState`] for more context.
1298 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1299 0 : let Some(inner) = self.0.as_mut() else {
1300 0 : return;
1301 : };
1302 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1303 0 : return;
1304 : };
1305 0 : inner.throttling.count_accounted_start.inc();
1306 0 : inner.timings = SmgrOpTimerState::Throttling {
1307 0 : throttle_started_at: at,
1308 0 : };
1309 0 : }
1310 :
1311 : /// See [`SmgrOpTimerState`] for more context.
1312 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1313 0 : let Some(inner) = self.0.as_mut() else {
1314 0 : return;
1315 : };
1316 : let SmgrOpTimerState::Throttling {
1317 0 : throttle_started_at,
1318 0 : } = &inner.timings
1319 : else {
1320 0 : return;
1321 : };
1322 0 : inner.throttling.count_accounted_finish.inc();
1323 0 : match throttle {
1324 0 : ThrottleResult::NotThrottled { end } => {
1325 0 : inner.timings = SmgrOpTimerState::Batching {
1326 0 : throttle_done_at: end,
1327 0 : };
1328 0 : }
1329 0 : ThrottleResult::Throttled { end } => {
1330 0 : // update metrics
1331 0 : inner.throttling.count_throttled.inc();
1332 0 : inner
1333 0 : .throttling
1334 0 : .wait_time
1335 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1336 0 : // state transition
1337 0 : inner.timings = SmgrOpTimerState::Batching {
1338 0 : throttle_done_at: end,
1339 0 : };
1340 0 : }
1341 : }
1342 0 : }
1343 :
1344 : /// See [`SmgrOpTimerState`] for more context.
1345 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1346 0 : let Some(inner) = self.0.as_mut() else {
1347 0 : return;
1348 : };
1349 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1350 0 : return;
1351 : };
1352 : // update metrics
1353 0 : let batch = at - *throttle_done_at;
1354 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1355 0 : inner
1356 0 : .per_timeline_batch_wait_time
1357 0 : .observe(batch.as_secs_f64());
1358 0 : // state transition
1359 0 : inner.timings = SmgrOpTimerState::Executing {
1360 0 : execution_started_at: at,
1361 0 : }
1362 0 : }
1363 :
1364 : /// For all but the first caller, this is a no-op.
1365 : /// The first callers receives Some, subsequent ones None.
1366 : ///
1367 : /// See [`SmgrOpTimerState`] for more context.
1368 0 : pub(crate) fn observe_execution_end_flush_start(
1369 0 : &mut self,
1370 0 : at: Instant,
1371 0 : ) -> Option<SmgrOpFlushInProgress> {
1372 : // NB: unlike the other observe_* methods, this one take()s.
1373 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1374 0 : let Some(mut inner) = self.0.take() else {
1375 0 : return None;
1376 : };
1377 : let SmgrOpTimerState::Executing {
1378 0 : execution_started_at,
1379 0 : } = &inner.timings
1380 : else {
1381 0 : return None;
1382 : };
1383 : // update metrics
1384 0 : let execution = at - *execution_started_at;
1385 0 : inner
1386 0 : .global_execution_latency_histo
1387 0 : .observe(execution.as_secs_f64());
1388 0 : if let Some(per_timeline_execution_latency_histo) =
1389 0 : &inner.per_timeline_execution_latency_histo
1390 0 : {
1391 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1392 0 : }
1393 :
1394 : // state transition
1395 0 : inner.timings = SmgrOpTimerState::Flushing;
1396 0 :
1397 0 : // return the flush in progress object which
1398 0 : // will do the remaining metrics updates
1399 0 : let SmgrOpTimerInner {
1400 0 : global_flush_in_progress_micros,
1401 0 : per_timeline_flush_in_progress_micros,
1402 0 : ..
1403 0 : } = inner;
1404 0 : Some(SmgrOpFlushInProgress {
1405 0 : flush_started_at: at,
1406 0 : global_micros: global_flush_in_progress_micros,
1407 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1408 0 : })
1409 0 : }
1410 : }
1411 :
1412 : /// The last stage of request processing is serializing and flushing the request
1413 : /// into the TCP connection. We want to make slow flushes observable
1414 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1415 : /// to periodically bump the metric.
1416 : ///
1417 : /// If in the future we decide that we're not interested in live updates, we can
1418 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1419 : /// and remove this struct from the code base.
1420 : pub(crate) struct SmgrOpFlushInProgress {
1421 : flush_started_at: Instant,
1422 : global_micros: IntCounter,
1423 : per_timeline_micros: IntCounter,
1424 : }
1425 :
1426 : impl Drop for SmgrOpTimer {
1427 0 : fn drop(&mut self) {
1428 0 : // In case of early drop, update any of the remaining metrics with
1429 0 : // observations so that (started,finished) counter pairs balance out
1430 0 : // and all counters on the latency path have the the same number of
1431 0 : // observations.
1432 0 : // It's technically lying and it would be better if each metric had
1433 0 : // a separate label or similar for cancelled requests.
1434 0 : // But we don't have that right now and counter pairs balancing
1435 0 : // out is useful when using the metrics in panels and whatnot.
1436 0 : let now = Instant::now();
1437 0 : self.observe_throttle_start(now);
1438 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1439 0 : self.observe_execution_start(now);
1440 0 : self.observe_execution_end_flush_start(now);
1441 0 : }
1442 : }
1443 :
1444 : impl SmgrOpFlushInProgress {
1445 0 : pub(crate) async fn measure<Fut, O>(mut self, mut fut: Fut) -> O
1446 0 : where
1447 0 : Fut: std::future::Future<Output = O>,
1448 0 : {
1449 0 : let mut fut = std::pin::pin!(fut);
1450 0 :
1451 0 : // Whenever observe_guard gets called, or dropped,
1452 0 : // it adds the time elapsed since its last call to metrics.
1453 0 : // Last call is tracked in `now`.
1454 0 : let mut observe_guard = scopeguard::guard(
1455 0 : || {
1456 0 : let now = Instant::now();
1457 0 : let elapsed = now - self.flush_started_at;
1458 0 : self.global_micros
1459 0 : .inc_by(u64::try_from(elapsed.as_micros()).unwrap());
1460 0 : self.per_timeline_micros
1461 0 : .inc_by(u64::try_from(elapsed.as_micros()).unwrap());
1462 0 : self.flush_started_at = now;
1463 0 : },
1464 0 : |mut observe| {
1465 0 : observe();
1466 0 : },
1467 0 : );
1468 :
1469 : loop {
1470 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1471 0 : Ok(v) => return v,
1472 0 : Err(_timeout) => {
1473 0 : (*observe_guard)();
1474 0 : }
1475 : }
1476 : }
1477 0 : }
1478 : }
1479 :
1480 : #[derive(
1481 : Debug,
1482 : Clone,
1483 : Copy,
1484 : IntoStaticStr,
1485 : strum_macros::EnumCount,
1486 0 : strum_macros::EnumIter,
1487 : strum_macros::FromRepr,
1488 : enum_map::Enum,
1489 : )]
1490 : #[strum(serialize_all = "snake_case")]
1491 : pub enum SmgrQueryType {
1492 : GetRelExists,
1493 : GetRelSize,
1494 : GetPageAtLsn,
1495 : GetDbSize,
1496 : GetSlruSegment,
1497 : #[cfg(feature = "testing")]
1498 : Test,
1499 : }
1500 :
1501 : pub(crate) struct SmgrQueryTimePerTimeline {
1502 : global_started: [IntCounter; SmgrQueryType::COUNT],
1503 : global_latency: [Histogram; SmgrQueryType::COUNT],
1504 : per_timeline_getpage_started: IntCounter,
1505 : per_timeline_getpage_latency: Histogram,
1506 : global_batch_size: Histogram,
1507 : per_timeline_batch_size: Histogram,
1508 : global_flush_in_progress_micros: IntCounter,
1509 : per_timeline_flush_in_progress_micros: IntCounter,
1510 : global_batch_wait_time: Histogram,
1511 : per_timeline_batch_wait_time: Histogram,
1512 : throttling: Arc<tenant_throttling::Pagestream>,
1513 : }
1514 :
1515 400 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1516 400 : register_int_counter_vec!(
1517 400 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1518 400 : "pageserver_smgr_query_started_global_count",
1519 400 : "Number of smgr queries started, aggregated by query type.",
1520 400 : &["smgr_query_type"],
1521 400 : )
1522 400 : .expect("failed to define a metric")
1523 400 : });
1524 :
1525 400 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1526 400 : register_int_counter_vec!(
1527 400 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1528 400 : "pageserver_smgr_query_started_count",
1529 400 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1530 400 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1531 400 : )
1532 400 : .expect("failed to define a metric")
1533 400 : });
1534 :
1535 : // Alias so all histograms recording per-timeline smgr timings use the same buckets.
1536 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
1537 :
1538 400 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1539 400 : register_histogram_vec!(
1540 400 : "pageserver_smgr_query_seconds",
1541 400 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1542 400 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1543 400 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1544 400 : )
1545 400 : .expect("failed to define a metric")
1546 400 : });
1547 :
1548 400 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1549 400 : [
1550 400 : 1,
1551 400 : 10,
1552 400 : 20,
1553 400 : 40,
1554 400 : 60,
1555 400 : 80,
1556 400 : 100,
1557 400 : 200,
1558 400 : 300,
1559 400 : 400,
1560 400 : 500,
1561 400 : 600,
1562 400 : 700,
1563 400 : 800,
1564 400 : 900,
1565 400 : 1_000, // 1ms
1566 400 : 2_000,
1567 400 : 4_000,
1568 400 : 6_000,
1569 400 : 8_000,
1570 400 : 10_000, // 10ms
1571 400 : 20_000,
1572 400 : 40_000,
1573 400 : 60_000,
1574 400 : 80_000,
1575 400 : 100_000,
1576 400 : 200_000,
1577 400 : 400_000,
1578 400 : 600_000,
1579 400 : 800_000,
1580 400 : 1_000_000, // 1s
1581 400 : 2_000_000,
1582 400 : 4_000_000,
1583 400 : 6_000_000,
1584 400 : 8_000_000,
1585 400 : 10_000_000, // 10s
1586 400 : 20_000_000,
1587 400 : 50_000_000,
1588 400 : 100_000_000,
1589 400 : 200_000_000,
1590 400 : 1_000_000_000, // 1000s
1591 400 : ]
1592 400 : .into_iter()
1593 400 : .map(Duration::from_micros)
1594 16400 : .map(|d| d.as_secs_f64())
1595 400 : .collect()
1596 400 : });
1597 :
1598 400 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1599 400 : register_histogram_vec!(
1600 400 : "pageserver_smgr_query_seconds_global",
1601 400 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1602 400 : &["smgr_query_type"],
1603 400 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1604 400 : )
1605 400 : .expect("failed to define a metric")
1606 400 : });
1607 :
1608 400 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1609 400 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1610 12800 : .map(|v| v.into())
1611 400 : .collect()
1612 400 : });
1613 :
1614 400 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1615 400 : register_histogram!(
1616 400 : "pageserver_page_service_batch_size_global",
1617 400 : "Batch size of pageserver page service requests",
1618 400 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1619 400 : )
1620 400 : .expect("failed to define a metric")
1621 400 : });
1622 :
1623 400 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1624 400 : let mut buckets = Vec::new();
1625 2800 : for i in 0.. {
1626 2800 : let bucket = 1 << i;
1627 2800 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1628 400 : break;
1629 2400 : }
1630 2400 : buckets.push(bucket.into());
1631 : }
1632 400 : buckets
1633 400 : });
1634 :
1635 400 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1636 400 : register_histogram_vec!(
1637 400 : "pageserver_page_service_batch_size",
1638 400 : "Batch size of pageserver page service requests",
1639 400 : &["tenant_id", "shard_id", "timeline_id"],
1640 400 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1641 400 : )
1642 400 : .expect("failed to define a metric")
1643 400 : });
1644 :
1645 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1646 0 : register_int_gauge_vec!(
1647 0 : "pageserver_page_service_config_max_batch_size",
1648 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1649 0 : Labels expose more of the configuration parameters.",
1650 0 : &["mode", "execution"]
1651 0 : )
1652 0 : .expect("failed to define a metric")
1653 0 : });
1654 :
1655 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1656 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1657 0 : let (label_values, value) = match conf {
1658 0 : PageServicePipeliningConfig::Serial => (["serial", "-"], 1),
1659 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1660 0 : max_batch_size,
1661 0 : execution,
1662 0 : }) => {
1663 0 : let mode = "pipelined";
1664 0 : let execution = match execution {
1665 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1666 0 : "concurrent-futures"
1667 : }
1668 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1669 : };
1670 0 : ([mode, execution], max_batch_size.get())
1671 : }
1672 : };
1673 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1674 0 : .with_label_values(&label_values)
1675 0 : .set(value.try_into().unwrap());
1676 0 : }
1677 :
1678 400 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1679 400 : register_int_counter_vec!(
1680 400 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1681 400 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1682 400 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1683 400 : easily discoverable in monitoring. \
1684 400 : Hence, this is NOT a completion latency historgram.",
1685 400 : &["tenant_id", "shard_id", "timeline_id"],
1686 400 : )
1687 400 : .expect("failed to define a metric")
1688 400 : });
1689 :
1690 400 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
1691 400 : register_int_counter!(
1692 400 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
1693 400 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
1694 400 : )
1695 400 : .expect("failed to define a metric")
1696 400 : });
1697 :
1698 400 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1699 400 : register_histogram_vec!(
1700 400 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
1701 400 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
1702 400 : &["tenant_id", "shard_id", "timeline_id"],
1703 400 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1704 400 : )
1705 400 : .expect("failed to define a metric")
1706 400 : });
1707 :
1708 400 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1709 400 : register_histogram!(
1710 400 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
1711 400 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
1712 400 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
1713 400 : )
1714 400 : .expect("failed to define a metric")
1715 400 : });
1716 :
1717 : impl SmgrQueryTimePerTimeline {
1718 892 : pub(crate) fn new(
1719 892 : tenant_shard_id: &TenantShardId,
1720 892 : timeline_id: &TimelineId,
1721 892 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
1722 892 : ) -> Self {
1723 892 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1724 892 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1725 892 : let timeline_id = timeline_id.to_string();
1726 5352 : let global_started = std::array::from_fn(|i| {
1727 5352 : let op = SmgrQueryType::from_repr(i).unwrap();
1728 5352 : SMGR_QUERY_STARTED_GLOBAL
1729 5352 : .get_metric_with_label_values(&[op.into()])
1730 5352 : .unwrap()
1731 5352 : });
1732 5352 : let global_latency = std::array::from_fn(|i| {
1733 5352 : let op = SmgrQueryType::from_repr(i).unwrap();
1734 5352 : SMGR_QUERY_TIME_GLOBAL
1735 5352 : .get_metric_with_label_values(&[op.into()])
1736 5352 : .unwrap()
1737 5352 : });
1738 892 :
1739 892 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
1740 892 : .get_metric_with_label_values(&[
1741 892 : SmgrQueryType::GetPageAtLsn.into(),
1742 892 : &tenant_id,
1743 892 : &shard_slug,
1744 892 : &timeline_id,
1745 892 : ])
1746 892 : .unwrap();
1747 892 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1748 892 : .get_metric_with_label_values(&[
1749 892 : SmgrQueryType::GetPageAtLsn.into(),
1750 892 : &tenant_id,
1751 892 : &shard_slug,
1752 892 : &timeline_id,
1753 892 : ])
1754 892 : .unwrap();
1755 892 :
1756 892 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
1757 892 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
1758 892 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1759 892 : .unwrap();
1760 892 :
1761 892 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
1762 892 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
1763 892 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1764 892 : .unwrap();
1765 892 :
1766 892 : let global_flush_in_progress_micros =
1767 892 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
1768 892 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
1769 892 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1770 892 : .unwrap();
1771 892 :
1772 892 : Self {
1773 892 : global_started,
1774 892 : global_latency,
1775 892 : per_timeline_getpage_latency,
1776 892 : per_timeline_getpage_started,
1777 892 : global_batch_size,
1778 892 : per_timeline_batch_size,
1779 892 : global_flush_in_progress_micros,
1780 892 : per_timeline_flush_in_progress_micros,
1781 892 : global_batch_wait_time,
1782 892 : per_timeline_batch_wait_time,
1783 892 : throttling: pagestream_throttle_metrics,
1784 892 : }
1785 892 : }
1786 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
1787 0 : self.global_started[op as usize].inc();
1788 :
1789 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1790 0 : self.per_timeline_getpage_started.inc();
1791 0 : Some(self.per_timeline_getpage_latency.clone())
1792 : } else {
1793 0 : None
1794 : };
1795 :
1796 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
1797 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
1798 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
1799 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
1800 0 : per_timeline_flush_in_progress_micros: self
1801 0 : .per_timeline_flush_in_progress_micros
1802 0 : .clone(),
1803 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
1804 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
1805 0 : throttling: self.throttling.clone(),
1806 0 : timings: SmgrOpTimerState::Received { received_at },
1807 0 : }))
1808 0 : }
1809 :
1810 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
1811 0 : pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) {
1812 0 : self.global_batch_size.observe(batch_size as f64);
1813 0 : self.per_timeline_batch_size.observe(batch_size as f64);
1814 0 : }
1815 : }
1816 :
1817 : // keep in sync with control plane Go code so that we can validate
1818 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1819 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1820 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1821 0 : [
1822 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1823 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1824 0 : ]
1825 0 : .map(|ms| (ms as f64) / 1000.0)
1826 0 : });
1827 :
1828 : pub(crate) struct BasebackupQueryTime {
1829 : ok: Histogram,
1830 : error: Histogram,
1831 : client_error: Histogram,
1832 : }
1833 :
1834 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1835 0 : let vec = register_histogram_vec!(
1836 0 : "pageserver_basebackup_query_seconds",
1837 0 : "Histogram of basebackup queries durations, by result type",
1838 0 : &["result"],
1839 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1840 0 : )
1841 0 : .expect("failed to define a metric");
1842 0 : BasebackupQueryTime {
1843 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1844 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1845 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
1846 0 : }
1847 0 : });
1848 :
1849 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
1850 : parent: &'a BasebackupQueryTime,
1851 : start: std::time::Instant,
1852 : }
1853 :
1854 : impl BasebackupQueryTime {
1855 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
1856 0 : let start = Instant::now();
1857 0 : BasebackupQueryTimeOngoingRecording {
1858 0 : parent: self,
1859 0 : start,
1860 0 : }
1861 0 : }
1862 : }
1863 :
1864 : impl BasebackupQueryTimeOngoingRecording<'_> {
1865 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
1866 0 : let elapsed = self.start.elapsed().as_secs_f64();
1867 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
1868 0 : let metric = match res {
1869 0 : Ok(_) => &self.parent.ok,
1870 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
1871 0 : if is_expected_io_error(io_error) =>
1872 0 : {
1873 0 : &self.parent.client_error
1874 : }
1875 0 : Err(_) => &self.parent.error,
1876 : };
1877 0 : metric.observe(elapsed);
1878 0 : }
1879 : }
1880 :
1881 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1882 0 : register_int_counter_pair_vec!(
1883 0 : "pageserver_live_connections_started",
1884 0 : "Number of network connections that we started handling",
1885 0 : "pageserver_live_connections_finished",
1886 0 : "Number of network connections that we finished handling",
1887 0 : &["pageserver_connection_kind"]
1888 0 : )
1889 0 : .expect("failed to define a metric")
1890 0 : });
1891 :
1892 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
1893 : pub(crate) enum ComputeCommandKind {
1894 : PageStreamV3,
1895 : PageStreamV2,
1896 : Basebackup,
1897 : Fullbackup,
1898 : LeaseLsn,
1899 : }
1900 :
1901 : pub(crate) struct ComputeCommandCounters {
1902 : map: EnumMap<ComputeCommandKind, IntCounter>,
1903 : }
1904 :
1905 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
1906 0 : let inner = register_int_counter_vec!(
1907 0 : "pageserver_compute_commands",
1908 0 : "Number of compute -> pageserver commands processed",
1909 0 : &["command"]
1910 0 : )
1911 0 : .expect("failed to define a metric");
1912 0 :
1913 0 : ComputeCommandCounters {
1914 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
1915 0 : let command = <ComputeCommandKind as enum_map::Enum>::from_usize(i);
1916 0 : let command_str: &'static str = command.into();
1917 0 : inner.with_label_values(&[command_str])
1918 0 : })),
1919 0 : }
1920 0 : });
1921 :
1922 : impl ComputeCommandCounters {
1923 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
1924 0 : &self.map[command]
1925 0 : }
1926 : }
1927 :
1928 : // remote storage metrics
1929 :
1930 392 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1931 392 : register_int_counter_pair_vec!(
1932 392 : "pageserver_remote_timeline_client_calls_started",
1933 392 : "Number of started calls to remote timeline client.",
1934 392 : "pageserver_remote_timeline_client_calls_finished",
1935 392 : "Number of finshed calls to remote timeline client.",
1936 392 : &[
1937 392 : "tenant_id",
1938 392 : "shard_id",
1939 392 : "timeline_id",
1940 392 : "file_kind",
1941 392 : "op_kind"
1942 392 : ],
1943 392 : )
1944 392 : .unwrap()
1945 392 : });
1946 :
1947 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
1948 388 : Lazy::new(|| {
1949 388 : register_int_counter_vec!(
1950 388 : "pageserver_remote_timeline_client_bytes_started",
1951 388 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1952 388 : The increment happens when the operation is scheduled.",
1953 388 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1954 388 : )
1955 388 : .expect("failed to define a metric")
1956 388 : });
1957 :
1958 388 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
1959 388 : register_int_counter_vec!(
1960 388 : "pageserver_remote_timeline_client_bytes_finished",
1961 388 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1962 388 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
1963 388 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1964 388 : )
1965 388 : .expect("failed to define a metric")
1966 388 : });
1967 :
1968 : pub(crate) struct TenantManagerMetrics {
1969 : tenant_slots_attached: UIntGauge,
1970 : tenant_slots_secondary: UIntGauge,
1971 : tenant_slots_inprogress: UIntGauge,
1972 : pub(crate) tenant_slot_writes: IntCounter,
1973 : pub(crate) unexpected_errors: IntCounter,
1974 : }
1975 :
1976 : impl TenantManagerMetrics {
1977 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
1978 : /// exactly: they track the lifetime of the slots _in the tenant map_.
1979 4 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
1980 4 : match slot {
1981 0 : TenantSlot::Attached(_) => {
1982 0 : self.tenant_slots_attached.inc();
1983 0 : }
1984 0 : TenantSlot::Secondary(_) => {
1985 0 : self.tenant_slots_secondary.inc();
1986 0 : }
1987 4 : TenantSlot::InProgress(_) => {
1988 4 : self.tenant_slots_inprogress.inc();
1989 4 : }
1990 : }
1991 4 : }
1992 :
1993 4 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
1994 4 : match slot {
1995 4 : TenantSlot::Attached(_) => {
1996 4 : self.tenant_slots_attached.dec();
1997 4 : }
1998 0 : TenantSlot::Secondary(_) => {
1999 0 : self.tenant_slots_secondary.dec();
2000 0 : }
2001 0 : TenantSlot::InProgress(_) => {
2002 0 : self.tenant_slots_inprogress.dec();
2003 0 : }
2004 : }
2005 4 : }
2006 :
2007 : #[cfg(all(debug_assertions, not(test)))]
2008 0 : pub(crate) fn slots_total(&self) -> u64 {
2009 0 : self.tenant_slots_attached.get()
2010 0 : + self.tenant_slots_secondary.get()
2011 0 : + self.tenant_slots_inprogress.get()
2012 0 : }
2013 : }
2014 :
2015 4 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2016 4 : let tenant_slots = register_uint_gauge_vec!(
2017 4 : "pageserver_tenant_manager_slots",
2018 4 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2019 4 : &["mode"]
2020 4 : )
2021 4 : .expect("failed to define a metric");
2022 4 : TenantManagerMetrics {
2023 4 : tenant_slots_attached: tenant_slots
2024 4 : .get_metric_with_label_values(&["attached"])
2025 4 : .unwrap(),
2026 4 : tenant_slots_secondary: tenant_slots
2027 4 : .get_metric_with_label_values(&["secondary"])
2028 4 : .unwrap(),
2029 4 : tenant_slots_inprogress: tenant_slots
2030 4 : .get_metric_with_label_values(&["inprogress"])
2031 4 : .unwrap(),
2032 4 : tenant_slot_writes: register_int_counter!(
2033 4 : "pageserver_tenant_manager_slot_writes",
2034 4 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2035 4 : )
2036 4 : .expect("failed to define a metric"),
2037 4 : unexpected_errors: register_int_counter!(
2038 4 : "pageserver_tenant_manager_unexpected_errors_total",
2039 4 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2040 4 : )
2041 4 : .expect("failed to define a metric"),
2042 4 : }
2043 4 : });
2044 :
2045 : pub(crate) struct DeletionQueueMetrics {
2046 : pub(crate) keys_submitted: IntCounter,
2047 : pub(crate) keys_dropped: IntCounter,
2048 : pub(crate) keys_executed: IntCounter,
2049 : pub(crate) keys_validated: IntCounter,
2050 : pub(crate) dropped_lsn_updates: IntCounter,
2051 : pub(crate) unexpected_errors: IntCounter,
2052 : pub(crate) remote_errors: IntCounterVec,
2053 : }
2054 67 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2055 67 : DeletionQueueMetrics{
2056 67 :
2057 67 : keys_submitted: register_int_counter!(
2058 67 : "pageserver_deletion_queue_submitted_total",
2059 67 : "Number of objects submitted for deletion"
2060 67 : )
2061 67 : .expect("failed to define a metric"),
2062 67 :
2063 67 : keys_dropped: register_int_counter!(
2064 67 : "pageserver_deletion_queue_dropped_total",
2065 67 : "Number of object deletions dropped due to stale generation."
2066 67 : )
2067 67 : .expect("failed to define a metric"),
2068 67 :
2069 67 : keys_executed: register_int_counter!(
2070 67 : "pageserver_deletion_queue_executed_total",
2071 67 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2072 67 : )
2073 67 : .expect("failed to define a metric"),
2074 67 :
2075 67 : keys_validated: register_int_counter!(
2076 67 : "pageserver_deletion_queue_validated_total",
2077 67 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2078 67 : )
2079 67 : .expect("failed to define a metric"),
2080 67 :
2081 67 : dropped_lsn_updates: register_int_counter!(
2082 67 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2083 67 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2084 67 : )
2085 67 : .expect("failed to define a metric"),
2086 67 : unexpected_errors: register_int_counter!(
2087 67 : "pageserver_deletion_queue_unexpected_errors_total",
2088 67 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2089 67 : )
2090 67 : .expect("failed to define a metric"),
2091 67 : remote_errors: register_int_counter_vec!(
2092 67 : "pageserver_deletion_queue_remote_errors_total",
2093 67 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2094 67 : &["op_kind"],
2095 67 : )
2096 67 : .expect("failed to define a metric")
2097 67 : }
2098 67 : });
2099 :
2100 : pub(crate) struct SecondaryModeMetrics {
2101 : pub(crate) upload_heatmap: IntCounter,
2102 : pub(crate) upload_heatmap_errors: IntCounter,
2103 : pub(crate) upload_heatmap_duration: Histogram,
2104 : pub(crate) download_heatmap: IntCounter,
2105 : pub(crate) download_layer: IntCounter,
2106 : }
2107 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2108 0 : SecondaryModeMetrics {
2109 0 : upload_heatmap: register_int_counter!(
2110 0 : "pageserver_secondary_upload_heatmap",
2111 0 : "Number of heatmaps written to remote storage by attached tenants"
2112 0 : )
2113 0 : .expect("failed to define a metric"),
2114 0 : upload_heatmap_errors: register_int_counter!(
2115 0 : "pageserver_secondary_upload_heatmap_errors",
2116 0 : "Failures writing heatmap to remote storage"
2117 0 : )
2118 0 : .expect("failed to define a metric"),
2119 0 : upload_heatmap_duration: register_histogram!(
2120 0 : "pageserver_secondary_upload_heatmap_duration",
2121 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2122 0 : )
2123 0 : .expect("failed to define a metric"),
2124 0 : download_heatmap: register_int_counter!(
2125 0 : "pageserver_secondary_download_heatmap",
2126 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2127 0 : )
2128 0 : .expect("failed to define a metric"),
2129 0 : download_layer: register_int_counter!(
2130 0 : "pageserver_secondary_download_layer",
2131 0 : "Number of downloads of layers by secondary mode locations"
2132 0 : )
2133 0 : .expect("failed to define a metric"),
2134 0 : }
2135 0 : });
2136 :
2137 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2138 0 : register_uint_gauge_vec!(
2139 0 : "pageserver_secondary_resident_physical_size",
2140 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2141 0 : &["tenant_id", "shard_id"]
2142 0 : )
2143 0 : .expect("failed to define a metric")
2144 0 : });
2145 :
2146 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2147 0 : register_uint_gauge!(
2148 0 : "pageserver_utilization_score",
2149 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2150 0 : )
2151 0 : .expect("failed to define a metric")
2152 0 : });
2153 :
2154 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2155 0 : register_uint_gauge_vec!(
2156 0 : "pageserver_secondary_heatmap_total_size",
2157 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2158 0 : &["tenant_id", "shard_id"]
2159 0 : )
2160 0 : .expect("failed to define a metric")
2161 0 : });
2162 :
2163 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2164 : pub enum RemoteOpKind {
2165 : Upload,
2166 : Download,
2167 : Delete,
2168 : }
2169 : impl RemoteOpKind {
2170 30383 : pub fn as_str(&self) -> &'static str {
2171 30383 : match self {
2172 28589 : Self::Upload => "upload",
2173 104 : Self::Download => "download",
2174 1690 : Self::Delete => "delete",
2175 : }
2176 30383 : }
2177 : }
2178 :
2179 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2180 : pub enum RemoteOpFileKind {
2181 : Layer,
2182 : Index,
2183 : }
2184 : impl RemoteOpFileKind {
2185 30383 : pub fn as_str(&self) -> &'static str {
2186 30383 : match self {
2187 21295 : Self::Layer => "layer",
2188 9088 : Self::Index => "index",
2189 : }
2190 30383 : }
2191 : }
2192 :
2193 388 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2194 388 : register_histogram_vec!(
2195 388 : "pageserver_remote_operation_seconds",
2196 388 : "Time spent on remote storage operations. \
2197 388 : Grouped by tenant, timeline, operation_kind and status. \
2198 388 : Does not account for time spent waiting in remote timeline client's queues.",
2199 388 : &["file_kind", "op_kind", "status"]
2200 388 : )
2201 388 : .expect("failed to define a metric")
2202 388 : });
2203 :
2204 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2205 0 : register_int_counter_vec!(
2206 0 : "pageserver_tenant_task_events",
2207 0 : "Number of task start/stop/fail events.",
2208 0 : &["event"],
2209 0 : )
2210 0 : .expect("Failed to register tenant_task_events metric")
2211 0 : });
2212 :
2213 : pub struct BackgroundLoopSemaphoreMetrics {
2214 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2215 : durations: EnumMap<BackgroundLoopKind, Counter>,
2216 : }
2217 :
2218 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> = Lazy::new(
2219 40 : || {
2220 40 : let counters = register_int_counter_pair_vec!(
2221 40 : "pageserver_background_loop_semaphore_wait_start_count",
2222 40 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2223 40 : "pageserver_background_loop_semaphore_wait_finish_count",
2224 40 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2225 40 : &["task"],
2226 40 : )
2227 40 : .unwrap();
2228 40 :
2229 40 : let durations = register_counter_vec!(
2230 40 : "pageserver_background_loop_semaphore_wait_duration_seconds",
2231 40 : "Sum of wall clock time spent waiting on the background loop concurrency-limiting semaphore acquire calls",
2232 40 : &["task"],
2233 40 : )
2234 40 : .unwrap();
2235 40 :
2236 40 : BackgroundLoopSemaphoreMetrics {
2237 360 : counters: enum_map::EnumMap::from_array(std::array::from_fn(|i| {
2238 360 : let kind = <BackgroundLoopKind as enum_map::Enum>::from_usize(i);
2239 360 : counters.with_label_values(&[kind.into()])
2240 360 : })),
2241 360 : durations: enum_map::EnumMap::from_array(std::array::from_fn(|i| {
2242 360 : let kind = <BackgroundLoopKind as enum_map::Enum>::from_usize(i);
2243 360 : durations.with_label_values(&[kind.into()])
2244 360 : })),
2245 40 : }
2246 40 : },
2247 : );
2248 :
2249 : impl BackgroundLoopSemaphoreMetrics {
2250 728 : pub(crate) fn measure_acquisition(&self, task: BackgroundLoopKind) -> impl Drop + '_ {
2251 : struct Record<'a> {
2252 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2253 : task: BackgroundLoopKind,
2254 : _counter_guard: metrics::IntCounterPairGuard,
2255 : start: Instant,
2256 : }
2257 : impl Drop for Record<'_> {
2258 728 : fn drop(&mut self) {
2259 728 : let elapsed = self.start.elapsed().as_secs_f64();
2260 728 : self.metrics.durations[self.task].inc_by(elapsed);
2261 728 : }
2262 : }
2263 728 : Record {
2264 728 : metrics: self,
2265 728 : task,
2266 728 : _counter_guard: self.counters[task].guard(),
2267 728 : start: Instant::now(),
2268 728 : }
2269 728 : }
2270 : }
2271 :
2272 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2273 0 : register_int_counter_vec!(
2274 0 : "pageserver_background_loop_period_overrun_count",
2275 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2276 0 : &["task", "period"],
2277 0 : )
2278 0 : .expect("failed to define a metric")
2279 0 : });
2280 :
2281 : // walreceiver metrics
2282 :
2283 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2284 0 : register_int_counter!(
2285 0 : "pageserver_walreceiver_started_connections_total",
2286 0 : "Number of started walreceiver connections"
2287 0 : )
2288 0 : .expect("failed to define a metric")
2289 0 : });
2290 :
2291 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2292 0 : register_int_gauge!(
2293 0 : "pageserver_walreceiver_active_managers",
2294 0 : "Number of active walreceiver managers"
2295 0 : )
2296 0 : .expect("failed to define a metric")
2297 0 : });
2298 :
2299 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2300 0 : register_int_counter_vec!(
2301 0 : "pageserver_walreceiver_switches_total",
2302 0 : "Number of walreceiver manager change_connection calls",
2303 0 : &["reason"]
2304 0 : )
2305 0 : .expect("failed to define a metric")
2306 0 : });
2307 :
2308 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2309 0 : register_int_counter!(
2310 0 : "pageserver_walreceiver_broker_updates_total",
2311 0 : "Number of received broker updates in walreceiver"
2312 0 : )
2313 0 : .expect("failed to define a metric")
2314 0 : });
2315 :
2316 4 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2317 4 : register_int_counter_vec!(
2318 4 : "pageserver_walreceiver_candidates_events_total",
2319 4 : "Number of walreceiver candidate events",
2320 4 : &["event"]
2321 4 : )
2322 4 : .expect("failed to define a metric")
2323 4 : });
2324 :
2325 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2326 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2327 :
2328 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2329 4 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2330 :
2331 : // Metrics collected on WAL redo operations
2332 : //
2333 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2334 : // for access to the postgres process ('wait') since there is only one for
2335 : // each tenant.
2336 :
2337 : /// Time buckets are small because we want to be able to measure the
2338 : /// smallest redo processing times. These buckets allow us to measure down
2339 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2340 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2341 : ///
2342 : /// Values up to 1s are recorded because metrics show that we have redo
2343 : /// durations and lock times larger than 0.250s.
2344 : macro_rules! redo_histogram_time_buckets {
2345 : () => {
2346 : vec![
2347 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2348 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2349 : 1.000_000,
2350 : ]
2351 : };
2352 : }
2353 :
2354 : /// While we're at it, also measure the amount of records replayed in each
2355 : /// operation. We have a global 'total replayed' counter, but that's not
2356 : /// as useful as 'what is the skew for how many records we replay in one
2357 : /// operation'.
2358 : macro_rules! redo_histogram_count_buckets {
2359 : () => {
2360 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2361 : };
2362 : }
2363 :
2364 : macro_rules! redo_bytes_histogram_count_buckets {
2365 : () => {
2366 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2367 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2368 : vec![
2369 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2370 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2371 : ]
2372 : };
2373 : }
2374 :
2375 : pub(crate) struct WalIngestMetrics {
2376 : pub(crate) bytes_received: IntCounter,
2377 : pub(crate) records_received: IntCounter,
2378 : pub(crate) records_observed: IntCounter,
2379 : pub(crate) records_committed: IntCounter,
2380 : pub(crate) records_filtered: IntCounter,
2381 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2382 : pub(crate) clear_vm_bits_unknown: IntCounterVec,
2383 : }
2384 :
2385 20 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2386 20 : WalIngestMetrics {
2387 20 : bytes_received: register_int_counter!(
2388 20 : "pageserver_wal_ingest_bytes_received",
2389 20 : "Bytes of WAL ingested from safekeepers",
2390 20 : )
2391 20 : .unwrap(),
2392 20 : records_received: register_int_counter!(
2393 20 : "pageserver_wal_ingest_records_received",
2394 20 : "Number of WAL records received from safekeepers"
2395 20 : )
2396 20 : .expect("failed to define a metric"),
2397 20 : records_observed: register_int_counter!(
2398 20 : "pageserver_wal_ingest_records_observed",
2399 20 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2400 20 : )
2401 20 : .expect("failed to define a metric"),
2402 20 : records_committed: register_int_counter!(
2403 20 : "pageserver_wal_ingest_records_committed",
2404 20 : "Number of WAL records which resulted in writes to pageserver storage"
2405 20 : )
2406 20 : .expect("failed to define a metric"),
2407 20 : records_filtered: register_int_counter!(
2408 20 : "pageserver_wal_ingest_records_filtered",
2409 20 : "Number of WAL records filtered out due to sharding"
2410 20 : )
2411 20 : .expect("failed to define a metric"),
2412 20 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2413 20 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2414 20 : "Total number of zero gap blocks written on relation extends"
2415 20 : )
2416 20 : .expect("failed to define a metric"),
2417 20 : clear_vm_bits_unknown: register_int_counter_vec!(
2418 20 : "pageserver_wal_ingest_clear_vm_bits_unknown",
2419 20 : "Number of ignored ClearVmBits operations due to unknown pages/relations",
2420 20 : &["entity"],
2421 20 : )
2422 20 : .expect("failed to define a metric"),
2423 20 : }
2424 20 : });
2425 :
2426 400 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2427 400 : register_int_counter_vec!(
2428 400 : "pageserver_timeline_wal_records_received",
2429 400 : "Number of WAL records received per shard",
2430 400 : &["tenant_id", "shard_id", "timeline_id"]
2431 400 : )
2432 400 : .expect("failed to define a metric")
2433 400 : });
2434 :
2435 12 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2436 12 : register_histogram!(
2437 12 : "pageserver_wal_redo_seconds",
2438 12 : "Time spent on WAL redo",
2439 12 : redo_histogram_time_buckets!()
2440 12 : )
2441 12 : .expect("failed to define a metric")
2442 12 : });
2443 :
2444 12 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2445 12 : register_histogram!(
2446 12 : "pageserver_wal_redo_records_histogram",
2447 12 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2448 12 : redo_histogram_count_buckets!(),
2449 12 : )
2450 12 : .expect("failed to define a metric")
2451 12 : });
2452 :
2453 12 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2454 12 : register_histogram!(
2455 12 : "pageserver_wal_redo_bytes_histogram",
2456 12 : "Histogram of number of records replayed per redo sent to Postgres",
2457 12 : redo_bytes_histogram_count_buckets!(),
2458 12 : )
2459 12 : .expect("failed to define a metric")
2460 12 : });
2461 :
2462 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2463 12 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2464 12 : register_int_counter!(
2465 12 : "pageserver_replayed_wal_records_total",
2466 12 : "Number of WAL records replayed in WAL redo process"
2467 12 : )
2468 12 : .unwrap()
2469 12 : });
2470 :
2471 : #[rustfmt::skip]
2472 16 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2473 16 : register_histogram!(
2474 16 : "pageserver_wal_redo_process_launch_duration",
2475 16 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2476 16 : vec![
2477 16 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2478 16 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2479 16 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2480 16 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2481 16 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2482 16 : ],
2483 16 : )
2484 16 : .expect("failed to define a metric")
2485 16 : });
2486 :
2487 : pub(crate) struct WalRedoProcessCounters {
2488 : pub(crate) started: IntCounter,
2489 : pub(crate) killed_by_cause: enum_map::EnumMap<WalRedoKillCause, IntCounter>,
2490 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2491 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2492 : }
2493 :
2494 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2495 : pub(crate) enum WalRedoKillCause {
2496 : WalRedoProcessDrop,
2497 : NoLeakChildDrop,
2498 : Startup,
2499 : }
2500 :
2501 : impl Default for WalRedoProcessCounters {
2502 16 : fn default() -> Self {
2503 16 : let started = register_int_counter!(
2504 16 : "pageserver_wal_redo_process_started_total",
2505 16 : "Number of WAL redo processes started",
2506 16 : )
2507 16 : .unwrap();
2508 16 :
2509 16 : let killed = register_int_counter_vec!(
2510 16 : "pageserver_wal_redo_process_stopped_total",
2511 16 : "Number of WAL redo processes stopped",
2512 16 : &["cause"],
2513 16 : )
2514 16 : .unwrap();
2515 16 :
2516 16 : let active_stderr_logger_tasks_started = register_int_counter!(
2517 16 : "pageserver_walredo_stderr_logger_tasks_started_total",
2518 16 : "Number of active walredo stderr logger tasks that have started",
2519 16 : )
2520 16 : .unwrap();
2521 16 :
2522 16 : let active_stderr_logger_tasks_finished = register_int_counter!(
2523 16 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2524 16 : "Number of active walredo stderr logger tasks that have finished",
2525 16 : )
2526 16 : .unwrap();
2527 16 :
2528 16 : Self {
2529 16 : started,
2530 48 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2531 48 : let cause = <WalRedoKillCause as enum_map::Enum>::from_usize(i);
2532 48 : let cause_str: &'static str = cause.into();
2533 48 : killed.with_label_values(&[cause_str])
2534 48 : })),
2535 16 : active_stderr_logger_tasks_started,
2536 16 : active_stderr_logger_tasks_finished,
2537 16 : }
2538 16 : }
2539 : }
2540 :
2541 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2542 : Lazy::new(WalRedoProcessCounters::default);
2543 :
2544 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2545 : pub(crate) struct StorageTimeMetricsTimer {
2546 : metrics: StorageTimeMetrics,
2547 : start: Instant,
2548 : }
2549 :
2550 : impl StorageTimeMetricsTimer {
2551 4236 : fn new(metrics: StorageTimeMetrics) -> Self {
2552 4236 : Self {
2553 4236 : metrics,
2554 4236 : start: Instant::now(),
2555 4236 : }
2556 4236 : }
2557 :
2558 : /// Returns the elapsed duration of the timer.
2559 4236 : pub fn elapsed(&self) -> Duration {
2560 4236 : self.start.elapsed()
2561 4236 : }
2562 :
2563 : /// Record the time from creation to now and return it.
2564 4236 : pub fn stop_and_record(self) -> Duration {
2565 4236 : let duration = self.elapsed();
2566 4236 : let seconds = duration.as_secs_f64();
2567 4236 : self.metrics.timeline_sum.inc_by(seconds);
2568 4236 : self.metrics.timeline_count.inc();
2569 4236 : self.metrics.global_histogram.observe(seconds);
2570 4236 : duration
2571 4236 : }
2572 :
2573 : /// Turns this timer into a timer, which will always record -- usually this means recording
2574 : /// regardless an early `?` path was taken in a function.
2575 8 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2576 8 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2577 8 : }
2578 : }
2579 :
2580 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2581 :
2582 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2583 8 : fn drop(&mut self) {
2584 8 : if let Some(inner) = self.0.take() {
2585 8 : inner.stop_and_record();
2586 8 : }
2587 8 : }
2588 : }
2589 :
2590 : impl AlwaysRecordingStorageTimeMetricsTimer {
2591 : /// Returns the elapsed duration of the timer.
2592 0 : pub fn elapsed(&self) -> Duration {
2593 0 : self.0.as_ref().expect("not dropped yet").elapsed()
2594 0 : }
2595 : }
2596 :
2597 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2598 : /// timeline total sum and count.
2599 : #[derive(Clone, Debug)]
2600 : pub(crate) struct StorageTimeMetrics {
2601 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2602 : timeline_sum: Counter,
2603 : /// Number of oeprations, per operation, tenant_id and timeline_id
2604 : timeline_count: IntCounter,
2605 : /// Global histogram having only the "operation" label.
2606 : global_histogram: Histogram,
2607 : }
2608 :
2609 : impl StorageTimeMetrics {
2610 8028 : pub fn new(
2611 8028 : operation: StorageTimeOperation,
2612 8028 : tenant_id: &str,
2613 8028 : shard_id: &str,
2614 8028 : timeline_id: &str,
2615 8028 : ) -> Self {
2616 8028 : let operation: &'static str = operation.into();
2617 8028 :
2618 8028 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2619 8028 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2620 8028 : .unwrap();
2621 8028 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2622 8028 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2623 8028 : .unwrap();
2624 8028 : let global_histogram = STORAGE_TIME_GLOBAL
2625 8028 : .get_metric_with_label_values(&[operation])
2626 8028 : .unwrap();
2627 8028 :
2628 8028 : StorageTimeMetrics {
2629 8028 : timeline_sum,
2630 8028 : timeline_count,
2631 8028 : global_histogram,
2632 8028 : }
2633 8028 : }
2634 :
2635 : /// Starts timing a new operation.
2636 : ///
2637 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2638 4236 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2639 4236 : StorageTimeMetricsTimer::new(self.clone())
2640 4236 : }
2641 : }
2642 :
2643 : #[derive(Debug)]
2644 : pub(crate) struct TimelineMetrics {
2645 : tenant_id: String,
2646 : shard_id: String,
2647 : timeline_id: String,
2648 : pub flush_time_histo: StorageTimeMetrics,
2649 : pub flush_delay_histo: StorageTimeMetrics,
2650 : pub flush_wait_upload_time_gauge: Gauge,
2651 : pub compact_time_histo: StorageTimeMetrics,
2652 : pub create_images_time_histo: StorageTimeMetrics,
2653 : pub logical_size_histo: StorageTimeMetrics,
2654 : pub imitate_logical_size_histo: StorageTimeMetrics,
2655 : pub load_layer_map_histo: StorageTimeMetrics,
2656 : pub garbage_collect_histo: StorageTimeMetrics,
2657 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2658 : pub last_record_lsn_gauge: IntGauge,
2659 : pub disk_consistent_lsn_gauge: IntGauge,
2660 : pub pitr_history_size: UIntGauge,
2661 : pub archival_size: UIntGauge,
2662 : pub layers_per_read: Histogram,
2663 : pub standby_horizon_gauge: IntGauge,
2664 : pub resident_physical_size_gauge: UIntGauge,
2665 : pub visible_physical_size_gauge: UIntGauge,
2666 : /// copy of LayeredTimeline.current_logical_size
2667 : pub current_logical_size_gauge: UIntGauge,
2668 : pub aux_file_size_gauge: IntGauge,
2669 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2670 : pub evictions: IntCounter,
2671 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2672 : /// Number of valid LSN leases.
2673 : pub valid_lsn_lease_count_gauge: UIntGauge,
2674 : pub wal_records_received: IntCounter,
2675 : shutdown: std::sync::atomic::AtomicBool,
2676 : }
2677 :
2678 : impl TimelineMetrics {
2679 892 : pub fn new(
2680 892 : tenant_shard_id: &TenantShardId,
2681 892 : timeline_id_raw: &TimelineId,
2682 892 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2683 892 : ) -> Self {
2684 892 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2685 892 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2686 892 : let timeline_id = timeline_id_raw.to_string();
2687 892 : let flush_time_histo = StorageTimeMetrics::new(
2688 892 : StorageTimeOperation::LayerFlush,
2689 892 : &tenant_id,
2690 892 : &shard_id,
2691 892 : &timeline_id,
2692 892 : );
2693 892 : let flush_delay_histo = StorageTimeMetrics::new(
2694 892 : StorageTimeOperation::LayerFlushDelay,
2695 892 : &tenant_id,
2696 892 : &shard_id,
2697 892 : &timeline_id,
2698 892 : );
2699 892 : let flush_wait_upload_time_gauge = FLUSH_WAIT_UPLOAD_TIME
2700 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2701 892 : .unwrap();
2702 892 : let compact_time_histo = StorageTimeMetrics::new(
2703 892 : StorageTimeOperation::Compact,
2704 892 : &tenant_id,
2705 892 : &shard_id,
2706 892 : &timeline_id,
2707 892 : );
2708 892 : let create_images_time_histo = StorageTimeMetrics::new(
2709 892 : StorageTimeOperation::CreateImages,
2710 892 : &tenant_id,
2711 892 : &shard_id,
2712 892 : &timeline_id,
2713 892 : );
2714 892 : let logical_size_histo = StorageTimeMetrics::new(
2715 892 : StorageTimeOperation::LogicalSize,
2716 892 : &tenant_id,
2717 892 : &shard_id,
2718 892 : &timeline_id,
2719 892 : );
2720 892 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2721 892 : StorageTimeOperation::ImitateLogicalSize,
2722 892 : &tenant_id,
2723 892 : &shard_id,
2724 892 : &timeline_id,
2725 892 : );
2726 892 : let load_layer_map_histo = StorageTimeMetrics::new(
2727 892 : StorageTimeOperation::LoadLayerMap,
2728 892 : &tenant_id,
2729 892 : &shard_id,
2730 892 : &timeline_id,
2731 892 : );
2732 892 : let garbage_collect_histo = StorageTimeMetrics::new(
2733 892 : StorageTimeOperation::Gc,
2734 892 : &tenant_id,
2735 892 : &shard_id,
2736 892 : &timeline_id,
2737 892 : );
2738 892 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2739 892 : StorageTimeOperation::FindGcCutoffs,
2740 892 : &tenant_id,
2741 892 : &shard_id,
2742 892 : &timeline_id,
2743 892 : );
2744 892 : let last_record_lsn_gauge = LAST_RECORD_LSN
2745 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2746 892 : .unwrap();
2747 892 :
2748 892 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
2749 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2750 892 : .unwrap();
2751 892 :
2752 892 : let pitr_history_size = PITR_HISTORY_SIZE
2753 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2754 892 : .unwrap();
2755 892 :
2756 892 : let archival_size = TIMELINE_ARCHIVE_SIZE
2757 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2758 892 : .unwrap();
2759 892 :
2760 892 : let layers_per_read = LAYERS_PER_READ
2761 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2762 892 : .unwrap();
2763 892 :
2764 892 : let standby_horizon_gauge = STANDBY_HORIZON
2765 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2766 892 : .unwrap();
2767 892 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2768 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2769 892 : .unwrap();
2770 892 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
2771 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2772 892 : .unwrap();
2773 892 : // TODO: we shouldn't expose this metric
2774 892 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2775 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2776 892 : .unwrap();
2777 892 : let aux_file_size_gauge = AUX_FILE_SIZE
2778 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2779 892 : .unwrap();
2780 892 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
2781 892 : let directory_entries_count_gauge_closure = {
2782 892 : let tenant_shard_id = *tenant_shard_id;
2783 892 : let timeline_id_raw = *timeline_id_raw;
2784 0 : move || {
2785 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2786 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2787 0 : let timeline_id = timeline_id_raw.to_string();
2788 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
2789 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2790 0 : .unwrap();
2791 0 : gauge
2792 0 : }
2793 : };
2794 892 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
2795 892 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
2796 892 : let evictions = EVICTIONS
2797 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2798 892 : .unwrap();
2799 892 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
2800 892 : .build(&tenant_id, &shard_id, &timeline_id);
2801 892 :
2802 892 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
2803 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2804 892 : .unwrap();
2805 892 :
2806 892 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
2807 892 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2808 892 : .unwrap();
2809 892 :
2810 892 : TimelineMetrics {
2811 892 : tenant_id,
2812 892 : shard_id,
2813 892 : timeline_id,
2814 892 : flush_time_histo,
2815 892 : flush_delay_histo,
2816 892 : flush_wait_upload_time_gauge,
2817 892 : compact_time_histo,
2818 892 : create_images_time_histo,
2819 892 : logical_size_histo,
2820 892 : imitate_logical_size_histo,
2821 892 : garbage_collect_histo,
2822 892 : find_gc_cutoffs_histo,
2823 892 : load_layer_map_histo,
2824 892 : last_record_lsn_gauge,
2825 892 : disk_consistent_lsn_gauge,
2826 892 : pitr_history_size,
2827 892 : archival_size,
2828 892 : layers_per_read,
2829 892 : standby_horizon_gauge,
2830 892 : resident_physical_size_gauge,
2831 892 : visible_physical_size_gauge,
2832 892 : current_logical_size_gauge,
2833 892 : aux_file_size_gauge,
2834 892 : directory_entries_count_gauge,
2835 892 : evictions,
2836 892 : evictions_with_low_residence_duration: std::sync::RwLock::new(
2837 892 : evictions_with_low_residence_duration,
2838 892 : ),
2839 892 : valid_lsn_lease_count_gauge,
2840 892 : wal_records_received,
2841 892 : shutdown: std::sync::atomic::AtomicBool::default(),
2842 892 : }
2843 892 : }
2844 :
2845 3144 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
2846 3144 : self.resident_physical_size_add(sz);
2847 3144 : }
2848 :
2849 1051 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
2850 1051 : self.resident_physical_size_gauge.sub(sz);
2851 1051 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
2852 1051 : }
2853 :
2854 3400 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
2855 3400 : self.resident_physical_size_gauge.add(sz);
2856 3400 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
2857 3400 : }
2858 :
2859 20 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
2860 20 : self.resident_physical_size_gauge.get()
2861 20 : }
2862 :
2863 2344 : pub(crate) fn flush_wait_upload_time_gauge_add(&self, duration: f64) {
2864 2344 : self.flush_wait_upload_time_gauge.add(duration);
2865 2344 : crate::metrics::FLUSH_WAIT_UPLOAD_TIME
2866 2344 : .get_metric_with_label_values(&[&self.tenant_id, &self.shard_id, &self.timeline_id])
2867 2344 : .unwrap()
2868 2344 : .add(duration);
2869 2344 : }
2870 :
2871 : /// Generates TIMELINE_LAYER labels for a persistent layer.
2872 5211 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
2873 5211 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
2874 2844 : true => LayerLevel::L0,
2875 2367 : false => LayerLevel::L1,
2876 : };
2877 5211 : let kind = match layer_desc.is_delta() {
2878 4355 : true => LayerKind::Delta,
2879 856 : false => LayerKind::Image,
2880 : };
2881 5211 : [
2882 5211 : &self.tenant_id,
2883 5211 : &self.shard_id,
2884 5211 : &self.timeline_id,
2885 5211 : level.into(),
2886 5211 : kind.into(),
2887 5211 : ]
2888 5211 : }
2889 :
2890 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
2891 4688 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
2892 4688 : [
2893 4688 : &self.tenant_id,
2894 4688 : &self.shard_id,
2895 4688 : &self.timeline_id,
2896 4688 : LayerLevel::Frozen.into(),
2897 4688 : LayerKind::Delta.into(), // by definition
2898 4688 : ]
2899 4688 : }
2900 :
2901 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
2902 2344 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
2903 2344 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
2904 2344 : let labels = self.make_frozen_layer_labels(layer);
2905 2344 : let size = layer.try_len().expect("frozen layer should have no writer");
2906 2344 : TIMELINE_LAYER_COUNT
2907 2344 : .get_metric_with_label_values(&labels)
2908 2344 : .unwrap()
2909 2344 : .dec();
2910 2344 : TIMELINE_LAYER_SIZE
2911 2344 : .get_metric_with_label_values(&labels)
2912 2344 : .unwrap()
2913 2344 : .sub(size);
2914 2344 : }
2915 :
2916 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
2917 2344 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
2918 2344 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
2919 2344 : let labels = self.make_frozen_layer_labels(layer);
2920 2344 : let size = layer.try_len().expect("frozen layer should have no writer");
2921 2344 : TIMELINE_LAYER_COUNT
2922 2344 : .get_metric_with_label_values(&labels)
2923 2344 : .unwrap()
2924 2344 : .inc();
2925 2344 : TIMELINE_LAYER_SIZE
2926 2344 : .get_metric_with_label_values(&labels)
2927 2344 : .unwrap()
2928 2344 : .add(size);
2929 2344 : }
2930 :
2931 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
2932 1379 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
2933 1379 : let labels = self.make_layer_labels(layer_desc);
2934 1379 : TIMELINE_LAYER_COUNT
2935 1379 : .get_metric_with_label_values(&labels)
2936 1379 : .unwrap()
2937 1379 : .dec();
2938 1379 : TIMELINE_LAYER_SIZE
2939 1379 : .get_metric_with_label_values(&labels)
2940 1379 : .unwrap()
2941 1379 : .sub(layer_desc.file_size);
2942 1379 : }
2943 :
2944 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
2945 3832 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
2946 3832 : let labels = self.make_layer_labels(layer_desc);
2947 3832 : TIMELINE_LAYER_COUNT
2948 3832 : .get_metric_with_label_values(&labels)
2949 3832 : .unwrap()
2950 3832 : .inc();
2951 3832 : TIMELINE_LAYER_SIZE
2952 3832 : .get_metric_with_label_values(&labels)
2953 3832 : .unwrap()
2954 3832 : .add(layer_desc.file_size);
2955 3832 : }
2956 :
2957 20 : pub(crate) fn shutdown(&self) {
2958 20 : let was_shutdown = self
2959 20 : .shutdown
2960 20 : .swap(true, std::sync::atomic::Ordering::Relaxed);
2961 20 :
2962 20 : if was_shutdown {
2963 : // this happens on tenant deletion because tenant first shuts down timelines, then
2964 : // invokes timeline deletion which first shuts down the timeline again.
2965 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2966 0 : return;
2967 20 : }
2968 20 :
2969 20 : let tenant_id = &self.tenant_id;
2970 20 : let timeline_id = &self.timeline_id;
2971 20 : let shard_id = &self.shard_id;
2972 20 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2973 20 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2974 20 : let _ = FLUSH_WAIT_UPLOAD_TIME.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2975 20 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2976 20 : {
2977 20 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
2978 20 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2979 20 : }
2980 20 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2981 20 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2982 20 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
2983 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2984 20 : }
2985 :
2986 20 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2987 20 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2988 :
2989 80 : for ref level in LayerLevel::iter() {
2990 180 : for ref kind in LayerKind::iter() {
2991 120 : let labels: [&str; 5] =
2992 120 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
2993 120 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
2994 120 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
2995 120 : }
2996 : }
2997 :
2998 20 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2999 20 :
3000 20 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3001 20 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3002 20 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3003 20 :
3004 20 : self.evictions_with_low_residence_duration
3005 20 : .write()
3006 20 : .unwrap()
3007 20 : .remove(tenant_id, shard_id, timeline_id);
3008 :
3009 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3010 : // removed at the end of it. The idea is to have the metrics outlive the
3011 : // entity during which they're observed, e.g., the smgr metrics shall
3012 : // outlive an individual smgr connection, but not the timeline.
3013 :
3014 200 : for op in StorageTimeOperation::VARIANTS {
3015 180 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3016 180 : op,
3017 180 : tenant_id,
3018 180 : shard_id,
3019 180 : timeline_id,
3020 180 : ]);
3021 180 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3022 180 : op,
3023 180 : tenant_id,
3024 180 : shard_id,
3025 180 : timeline_id,
3026 180 : ]);
3027 180 : }
3028 :
3029 60 : for op in STORAGE_IO_SIZE_OPERATIONS {
3030 40 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3031 40 : }
3032 :
3033 20 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3034 20 : SmgrQueryType::GetPageAtLsn.into(),
3035 20 : tenant_id,
3036 20 : shard_id,
3037 20 : timeline_id,
3038 20 : ]);
3039 20 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3040 20 : SmgrQueryType::GetPageAtLsn.into(),
3041 20 : tenant_id,
3042 20 : shard_id,
3043 20 : timeline_id,
3044 20 : ]);
3045 20 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3046 20 : tenant_id,
3047 20 : shard_id,
3048 20 : timeline_id,
3049 20 : ]);
3050 20 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3051 20 : tenant_id,
3052 20 : shard_id,
3053 20 : timeline_id,
3054 20 : ]);
3055 20 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3056 20 : tenant_id,
3057 20 : shard_id,
3058 20 : timeline_id,
3059 20 : ]);
3060 20 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3061 20 : tenant_id,
3062 20 : shard_id,
3063 20 : timeline_id,
3064 20 : ]);
3065 20 : }
3066 : }
3067 :
3068 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3069 12 : // Only shard zero deals in synthetic sizes
3070 12 : if tenant_shard_id.is_shard_zero() {
3071 12 : let tid = tenant_shard_id.tenant_id.to_string();
3072 12 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3073 12 : }
3074 :
3075 12 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3076 12 :
3077 12 : // we leave the BROKEN_TENANTS_SET entry if any
3078 12 : }
3079 :
3080 : /// Maintain a per timeline gauge in addition to the global gauge.
3081 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3082 : last_set: AtomicU64,
3083 : gauge: UIntGauge,
3084 : }
3085 :
3086 : impl PerTimelineRemotePhysicalSizeGauge {
3087 912 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3088 912 : Self {
3089 912 : last_set: AtomicU64::new(0),
3090 912 : gauge: per_timeline_gauge,
3091 912 : }
3092 912 : }
3093 3854 : pub(crate) fn set(&self, sz: u64) {
3094 3854 : self.gauge.set(sz);
3095 3854 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3096 3854 : if sz < prev {
3097 74 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3098 3780 : } else {
3099 3780 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3100 3780 : };
3101 3854 : }
3102 4 : pub(crate) fn get(&self) -> u64 {
3103 4 : self.gauge.get()
3104 4 : }
3105 : }
3106 :
3107 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3108 40 : fn drop(&mut self) {
3109 40 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3110 40 : }
3111 : }
3112 :
3113 : pub(crate) struct RemoteTimelineClientMetrics {
3114 : tenant_id: String,
3115 : shard_id: String,
3116 : timeline_id: String,
3117 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3118 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3119 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3120 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3121 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3122 : }
3123 :
3124 : impl RemoteTimelineClientMetrics {
3125 912 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3126 912 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3127 912 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3128 912 : let timeline_id_str = timeline_id.to_string();
3129 912 :
3130 912 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3131 912 : REMOTE_PHYSICAL_SIZE
3132 912 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3133 912 : .unwrap(),
3134 912 : );
3135 912 :
3136 912 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3137 912 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3138 912 : .unwrap();
3139 912 :
3140 912 : RemoteTimelineClientMetrics {
3141 912 : tenant_id: tenant_id_str,
3142 912 : shard_id: shard_id_str,
3143 912 : timeline_id: timeline_id_str,
3144 912 : calls: Mutex::new(HashMap::default()),
3145 912 : bytes_started_counter: Mutex::new(HashMap::default()),
3146 912 : bytes_finished_counter: Mutex::new(HashMap::default()),
3147 912 : remote_physical_size_gauge,
3148 912 : projected_remote_consistent_lsn_gauge,
3149 912 : }
3150 912 : }
3151 :
3152 6077 : pub fn remote_operation_time(
3153 6077 : &self,
3154 6077 : file_kind: &RemoteOpFileKind,
3155 6077 : op_kind: &RemoteOpKind,
3156 6077 : status: &'static str,
3157 6077 : ) -> Histogram {
3158 6077 : let key = (file_kind.as_str(), op_kind.as_str(), status);
3159 6077 : REMOTE_OPERATION_TIME
3160 6077 : .get_metric_with_label_values(&[key.0, key.1, key.2])
3161 6077 : .unwrap()
3162 6077 : }
3163 :
3164 14307 : fn calls_counter_pair(
3165 14307 : &self,
3166 14307 : file_kind: &RemoteOpFileKind,
3167 14307 : op_kind: &RemoteOpKind,
3168 14307 : ) -> IntCounterPair {
3169 14307 : let mut guard = self.calls.lock().unwrap();
3170 14307 : let key = (file_kind.as_str(), op_kind.as_str());
3171 14307 : let metric = guard.entry(key).or_insert_with(move || {
3172 1634 : REMOTE_TIMELINE_CLIENT_CALLS
3173 1634 : .get_metric_with_label_values(&[
3174 1634 : &self.tenant_id,
3175 1634 : &self.shard_id,
3176 1634 : &self.timeline_id,
3177 1634 : key.0,
3178 1634 : key.1,
3179 1634 : ])
3180 1634 : .unwrap()
3181 14307 : });
3182 14307 : metric.clone()
3183 14307 : }
3184 :
3185 3456 : fn bytes_started_counter(
3186 3456 : &self,
3187 3456 : file_kind: &RemoteOpFileKind,
3188 3456 : op_kind: &RemoteOpKind,
3189 3456 : ) -> IntCounter {
3190 3456 : let mut guard = self.bytes_started_counter.lock().unwrap();
3191 3456 : let key = (file_kind.as_str(), op_kind.as_str());
3192 3456 : let metric = guard.entry(key).or_insert_with(move || {
3193 644 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3194 644 : .get_metric_with_label_values(&[
3195 644 : &self.tenant_id,
3196 644 : &self.shard_id,
3197 644 : &self.timeline_id,
3198 644 : key.0,
3199 644 : key.1,
3200 644 : ])
3201 644 : .unwrap()
3202 3456 : });
3203 3456 : metric.clone()
3204 3456 : }
3205 :
3206 6519 : fn bytes_finished_counter(
3207 6519 : &self,
3208 6519 : file_kind: &RemoteOpFileKind,
3209 6519 : op_kind: &RemoteOpKind,
3210 6519 : ) -> IntCounter {
3211 6519 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3212 6519 : let key = (file_kind.as_str(), op_kind.as_str());
3213 6519 : let metric = guard.entry(key).or_insert_with(move || {
3214 644 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3215 644 : .get_metric_with_label_values(&[
3216 644 : &self.tenant_id,
3217 644 : &self.shard_id,
3218 644 : &self.timeline_id,
3219 644 : key.0,
3220 644 : key.1,
3221 644 : ])
3222 644 : .unwrap()
3223 6519 : });
3224 6519 : metric.clone()
3225 6519 : }
3226 : }
3227 :
3228 : #[cfg(test)]
3229 : impl RemoteTimelineClientMetrics {
3230 12 : pub fn get_bytes_started_counter_value(
3231 12 : &self,
3232 12 : file_kind: &RemoteOpFileKind,
3233 12 : op_kind: &RemoteOpKind,
3234 12 : ) -> Option<u64> {
3235 12 : let guard = self.bytes_started_counter.lock().unwrap();
3236 12 : let key = (file_kind.as_str(), op_kind.as_str());
3237 12 : guard.get(&key).map(|counter| counter.get())
3238 12 : }
3239 :
3240 12 : pub fn get_bytes_finished_counter_value(
3241 12 : &self,
3242 12 : file_kind: &RemoteOpFileKind,
3243 12 : op_kind: &RemoteOpKind,
3244 12 : ) -> Option<u64> {
3245 12 : let guard = self.bytes_finished_counter.lock().unwrap();
3246 12 : let key = (file_kind.as_str(), op_kind.as_str());
3247 12 : guard.get(&key).map(|counter| counter.get())
3248 12 : }
3249 : }
3250 :
3251 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3252 : #[must_use]
3253 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3254 : /// Decremented on drop.
3255 : calls_counter_pair: Option<IntCounterPair>,
3256 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3257 : bytes_finished: Option<(IntCounter, u64)>,
3258 : }
3259 :
3260 : impl RemoteTimelineClientCallMetricGuard {
3261 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3262 : /// The caller vouches to do the metric updates manually.
3263 7555 : pub fn will_decrement_manually(mut self) {
3264 7555 : let RemoteTimelineClientCallMetricGuard {
3265 7555 : calls_counter_pair,
3266 7555 : bytes_finished,
3267 7555 : } = &mut self;
3268 7555 : calls_counter_pair.take();
3269 7555 : bytes_finished.take();
3270 7555 : }
3271 : }
3272 :
3273 : impl Drop for RemoteTimelineClientCallMetricGuard {
3274 7607 : fn drop(&mut self) {
3275 7607 : let RemoteTimelineClientCallMetricGuard {
3276 7607 : calls_counter_pair,
3277 7607 : bytes_finished,
3278 7607 : } = self;
3279 7607 : if let Some(guard) = calls_counter_pair.take() {
3280 52 : guard.dec();
3281 7555 : }
3282 7607 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3283 0 : bytes_finished_metric.inc_by(*value);
3284 7607 : }
3285 7607 : }
3286 : }
3287 :
3288 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3289 : /// track the byte size of this call in applicable metric(s).
3290 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3291 : /// Do not account for this call's byte size in any metrics.
3292 : /// The `reason` field is there to make the call sites self-documenting
3293 : /// about why they don't need the metric.
3294 : DontTrackSize { reason: &'static str },
3295 : /// Track the byte size of the call in applicable metric(s).
3296 : Bytes(u64),
3297 : }
3298 :
3299 : impl RemoteTimelineClientMetrics {
3300 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3301 : ///
3302 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3303 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3304 : /// is more suitable.
3305 : /// Never do both.
3306 7607 : pub(crate) fn call_begin(
3307 7607 : &self,
3308 7607 : file_kind: &RemoteOpFileKind,
3309 7607 : op_kind: &RemoteOpKind,
3310 7607 : size: RemoteTimelineClientMetricsCallTrackSize,
3311 7607 : ) -> RemoteTimelineClientCallMetricGuard {
3312 7607 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3313 7607 : calls_counter_pair.inc();
3314 :
3315 7607 : let bytes_finished = match size {
3316 4151 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3317 4151 : // nothing to do
3318 4151 : None
3319 : }
3320 3456 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3321 3456 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3322 3456 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3323 3456 : Some((finished_counter, size))
3324 : }
3325 : };
3326 7607 : RemoteTimelineClientCallMetricGuard {
3327 7607 : calls_counter_pair: Some(calls_counter_pair),
3328 7607 : bytes_finished,
3329 7607 : }
3330 7607 : }
3331 :
3332 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3333 : /// Using the guard object is generally preferable.
3334 : /// See [`call_begin`](Self::call_begin) for more context.
3335 6700 : pub(crate) fn call_end(
3336 6700 : &self,
3337 6700 : file_kind: &RemoteOpFileKind,
3338 6700 : op_kind: &RemoteOpKind,
3339 6700 : size: RemoteTimelineClientMetricsCallTrackSize,
3340 6700 : ) {
3341 6700 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3342 6700 : calls_counter_pair.dec();
3343 6700 : match size {
3344 3637 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3345 3063 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3346 3063 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3347 3063 : }
3348 : }
3349 6700 : }
3350 : }
3351 :
3352 : impl Drop for RemoteTimelineClientMetrics {
3353 40 : fn drop(&mut self) {
3354 40 : let RemoteTimelineClientMetrics {
3355 40 : tenant_id,
3356 40 : shard_id,
3357 40 : timeline_id,
3358 40 : remote_physical_size_gauge,
3359 40 : calls,
3360 40 : bytes_started_counter,
3361 40 : bytes_finished_counter,
3362 40 : projected_remote_consistent_lsn_gauge,
3363 40 : } = self;
3364 48 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3365 48 : let mut res = [Ok(()), Ok(())];
3366 48 : REMOTE_TIMELINE_CLIENT_CALLS
3367 48 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3368 48 : // don't care about results
3369 48 : }
3370 40 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3371 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3372 12 : tenant_id,
3373 12 : shard_id,
3374 12 : timeline_id,
3375 12 : a,
3376 12 : b,
3377 12 : ]);
3378 12 : }
3379 40 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3380 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3381 12 : tenant_id,
3382 12 : shard_id,
3383 12 : timeline_id,
3384 12 : a,
3385 12 : b,
3386 12 : ]);
3387 12 : }
3388 40 : {
3389 40 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3390 40 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3391 40 : }
3392 40 : {
3393 40 : let _ = projected_remote_consistent_lsn_gauge;
3394 40 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3395 40 : tenant_id,
3396 40 : shard_id,
3397 40 : timeline_id,
3398 40 : ]);
3399 40 : }
3400 40 : }
3401 : }
3402 :
3403 : /// Wrapper future that measures the time spent by a remote storage operation,
3404 : /// and records the time and success/failure as a prometheus metric.
3405 : pub(crate) trait MeasureRemoteOp: Sized {
3406 6360 : fn measure_remote_op(
3407 6360 : self,
3408 6360 : file_kind: RemoteOpFileKind,
3409 6360 : op: RemoteOpKind,
3410 6360 : metrics: Arc<RemoteTimelineClientMetrics>,
3411 6360 : ) -> MeasuredRemoteOp<Self> {
3412 6360 : let start = Instant::now();
3413 6360 : MeasuredRemoteOp {
3414 6360 : inner: self,
3415 6360 : file_kind,
3416 6360 : op,
3417 6360 : start,
3418 6360 : metrics,
3419 6360 : }
3420 6360 : }
3421 : }
3422 :
3423 : impl<T: Sized> MeasureRemoteOp for T {}
3424 :
3425 : pin_project! {
3426 : pub(crate) struct MeasuredRemoteOp<F>
3427 : {
3428 : #[pin]
3429 : inner: F,
3430 : file_kind: RemoteOpFileKind,
3431 : op: RemoteOpKind,
3432 : start: Instant,
3433 : metrics: Arc<RemoteTimelineClientMetrics>,
3434 : }
3435 : }
3436 :
3437 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
3438 : type Output = Result<O, E>;
3439 :
3440 97334 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3441 97334 : let this = self.project();
3442 97334 : let poll_result = this.inner.poll(cx);
3443 97334 : if let Poll::Ready(ref res) = poll_result {
3444 6077 : let duration = this.start.elapsed();
3445 6077 : let status = if res.is_ok() { &"success" } else { &"failure" };
3446 6077 : this.metrics
3447 6077 : .remote_operation_time(this.file_kind, this.op, status)
3448 6077 : .observe(duration.as_secs_f64());
3449 91257 : }
3450 97334 : poll_result
3451 97334 : }
3452 : }
3453 :
3454 : pub mod tokio_epoll_uring {
3455 : use std::{
3456 : collections::HashMap,
3457 : sync::{Arc, Mutex},
3458 : };
3459 :
3460 : use metrics::{register_histogram, register_int_counter, Histogram, LocalHistogram, UIntGauge};
3461 : use once_cell::sync::Lazy;
3462 :
3463 : /// Shared storage for tokio-epoll-uring thread local metrics.
3464 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3465 232 : Lazy::new(|| {
3466 232 : let slots_submission_queue_depth = register_histogram!(
3467 232 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3468 232 : "The slots waiters queue depth of each tokio_epoll_uring system",
3469 232 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
3470 232 : )
3471 232 : .expect("failed to define a metric");
3472 232 : ThreadLocalMetricsStorage {
3473 232 : observers: Mutex::new(HashMap::new()),
3474 232 : slots_submission_queue_depth,
3475 232 : }
3476 232 : });
3477 :
3478 : pub struct ThreadLocalMetricsStorage {
3479 : /// List of thread local metrics observers.
3480 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3481 : /// A histogram shared between all thread local systems
3482 : /// for collecting slots submission queue depth.
3483 : slots_submission_queue_depth: Histogram,
3484 : }
3485 :
3486 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3487 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3488 : ///
3489 : /// The System makes observations into [`Self`] and periodically, the collector
3490 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3491 : ///
3492 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3493 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3494 : /// for cache coherence protocol to get an exclusive cache line.
3495 : pub struct ThreadLocalMetrics {
3496 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3497 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3498 : }
3499 :
3500 : impl ThreadLocalMetricsStorage {
3501 : /// Registers a new thread local system. Returns a thread local metrics observer.
3502 993 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3503 993 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3504 993 : self.slots_submission_queue_depth.local(),
3505 993 : ));
3506 993 : let mut g = self.observers.lock().unwrap();
3507 993 : g.insert(id, Arc::clone(&per_system_metrics));
3508 993 : per_system_metrics
3509 993 : }
3510 :
3511 : /// Removes metrics observer for a thread local system.
3512 : /// This should be called before dropping a thread local system.
3513 232 : pub fn remove_system(&self, id: u64) {
3514 232 : let mut g = self.observers.lock().unwrap();
3515 232 : g.remove(&id);
3516 232 : }
3517 :
3518 : /// Flush all thread local metrics to the shared storage.
3519 0 : pub fn flush_thread_local_metrics(&self) {
3520 0 : let g = self.observers.lock().unwrap();
3521 0 : g.values().for_each(|local| {
3522 0 : local.flush();
3523 0 : });
3524 0 : }
3525 : }
3526 :
3527 : impl ThreadLocalMetrics {
3528 993 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3529 993 : ThreadLocalMetrics {
3530 993 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3531 993 : }
3532 993 : }
3533 :
3534 : /// Flushes the thread local metrics to shared aggregator.
3535 0 : pub fn flush(&self) {
3536 0 : let Self {
3537 0 : slots_submission_queue_depth,
3538 0 : } = self;
3539 0 : slots_submission_queue_depth.lock().unwrap().flush();
3540 0 : }
3541 : }
3542 :
3543 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3544 1819474 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3545 1819474 : let Self {
3546 1819474 : slots_submission_queue_depth,
3547 1819474 : } = self;
3548 1819474 : slots_submission_queue_depth
3549 1819474 : .lock()
3550 1819474 : .unwrap()
3551 1819474 : .observe(queue_depth as f64);
3552 1819474 : }
3553 : }
3554 :
3555 : pub struct Collector {
3556 : descs: Vec<metrics::core::Desc>,
3557 : systems_created: UIntGauge,
3558 : systems_destroyed: UIntGauge,
3559 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3560 : }
3561 :
3562 : impl metrics::core::Collector for Collector {
3563 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3564 0 : self.descs.iter().collect()
3565 0 : }
3566 :
3567 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3568 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3569 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
3570 0 : systems_created,
3571 0 : systems_destroyed,
3572 0 : } = tokio_epoll_uring::metrics::global();
3573 0 : self.systems_created.set(systems_created);
3574 0 : mfs.extend(self.systems_created.collect());
3575 0 : self.systems_destroyed.set(systems_destroyed);
3576 0 : mfs.extend(self.systems_destroyed.collect());
3577 0 :
3578 0 : self.thread_local_metrics_storage
3579 0 : .flush_thread_local_metrics();
3580 0 :
3581 0 : mfs.extend(
3582 0 : self.thread_local_metrics_storage
3583 0 : .slots_submission_queue_depth
3584 0 : .collect(),
3585 0 : );
3586 0 : mfs
3587 0 : }
3588 : }
3589 :
3590 : impl Collector {
3591 : const NMETRICS: usize = 3;
3592 :
3593 : #[allow(clippy::new_without_default)]
3594 0 : pub fn new() -> Self {
3595 0 : let mut descs = Vec::new();
3596 0 :
3597 0 : let systems_created = UIntGauge::new(
3598 0 : "pageserver_tokio_epoll_uring_systems_created",
3599 0 : "counter of tokio-epoll-uring systems that were created",
3600 0 : )
3601 0 : .unwrap();
3602 0 : descs.extend(
3603 0 : metrics::core::Collector::desc(&systems_created)
3604 0 : .into_iter()
3605 0 : .cloned(),
3606 0 : );
3607 0 :
3608 0 : let systems_destroyed = UIntGauge::new(
3609 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
3610 0 : "counter of tokio-epoll-uring systems that were destroyed",
3611 0 : )
3612 0 : .unwrap();
3613 0 : descs.extend(
3614 0 : metrics::core::Collector::desc(&systems_destroyed)
3615 0 : .into_iter()
3616 0 : .cloned(),
3617 0 : );
3618 0 :
3619 0 : Self {
3620 0 : descs,
3621 0 : systems_created,
3622 0 : systems_destroyed,
3623 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
3624 0 : }
3625 0 : }
3626 : }
3627 :
3628 232 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3629 232 : register_int_counter!(
3630 232 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
3631 232 : "Number of times where thread_local_system creation spanned multiple executor threads",
3632 232 : )
3633 232 : .unwrap()
3634 232 : });
3635 :
3636 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3637 0 : register_int_counter!(
3638 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
3639 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
3640 0 : )
3641 0 : .unwrap()
3642 0 : });
3643 : }
3644 :
3645 : pub(crate) mod tenant_throttling {
3646 : use metrics::{register_int_counter_vec, IntCounter};
3647 : use once_cell::sync::Lazy;
3648 : use utils::shard::TenantShardId;
3649 :
3650 : pub(crate) struct GlobalAndPerTenantIntCounter {
3651 : global: IntCounter,
3652 : per_tenant: IntCounter,
3653 : }
3654 :
3655 : impl GlobalAndPerTenantIntCounter {
3656 : #[inline(always)]
3657 0 : pub(crate) fn inc(&self) {
3658 0 : self.inc_by(1)
3659 0 : }
3660 : #[inline(always)]
3661 0 : pub(crate) fn inc_by(&self, n: u64) {
3662 0 : self.global.inc_by(n);
3663 0 : self.per_tenant.inc_by(n);
3664 0 : }
3665 : }
3666 :
3667 : pub(crate) struct Metrics<const KIND: usize> {
3668 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
3669 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
3670 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
3671 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
3672 : }
3673 :
3674 404 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3675 404 : register_int_counter_vec!(
3676 404 : "pageserver_tenant_throttling_count_accounted_start_global",
3677 404 : "Count of tenant throttling starts, by kind of throttle.",
3678 404 : &["kind"]
3679 404 : )
3680 404 : .unwrap()
3681 404 : });
3682 404 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3683 404 : register_int_counter_vec!(
3684 404 : "pageserver_tenant_throttling_count_accounted_start",
3685 404 : "Count of tenant throttling starts, by kind of throttle.",
3686 404 : &["kind", "tenant_id", "shard_id"]
3687 404 : )
3688 404 : .unwrap()
3689 404 : });
3690 404 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3691 404 : register_int_counter_vec!(
3692 404 : "pageserver_tenant_throttling_count_accounted_finish_global",
3693 404 : "Count of tenant throttling finishes, by kind of throttle.",
3694 404 : &["kind"]
3695 404 : )
3696 404 : .unwrap()
3697 404 : });
3698 404 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3699 404 : register_int_counter_vec!(
3700 404 : "pageserver_tenant_throttling_count_accounted_finish",
3701 404 : "Count of tenant throttling finishes, by kind of throttle.",
3702 404 : &["kind", "tenant_id", "shard_id"]
3703 404 : )
3704 404 : .unwrap()
3705 404 : });
3706 404 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3707 404 : register_int_counter_vec!(
3708 404 : "pageserver_tenant_throttling_wait_usecs_sum_global",
3709 404 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3710 404 : &["kind"]
3711 404 : )
3712 404 : .unwrap()
3713 404 : });
3714 404 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3715 404 : register_int_counter_vec!(
3716 404 : "pageserver_tenant_throttling_wait_usecs_sum",
3717 404 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3718 404 : &["kind", "tenant_id", "shard_id"]
3719 404 : )
3720 404 : .unwrap()
3721 404 : });
3722 :
3723 404 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3724 404 : register_int_counter_vec!(
3725 404 : "pageserver_tenant_throttling_count_global",
3726 404 : "Count of tenant throttlings, by kind of throttle.",
3727 404 : &["kind"]
3728 404 : )
3729 404 : .unwrap()
3730 404 : });
3731 404 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3732 404 : register_int_counter_vec!(
3733 404 : "pageserver_tenant_throttling_count",
3734 404 : "Count of tenant throttlings, by kind of throttle.",
3735 404 : &["kind", "tenant_id", "shard_id"]
3736 404 : )
3737 404 : .unwrap()
3738 404 : });
3739 :
3740 : const KINDS: &[&str] = &["pagestream"];
3741 : pub type Pagestream = Metrics<0>;
3742 :
3743 : impl<const KIND: usize> Metrics<KIND> {
3744 440 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
3745 440 : let per_tenant_label_values = &[
3746 440 : KINDS[KIND],
3747 440 : &tenant_shard_id.tenant_id.to_string(),
3748 440 : &tenant_shard_id.shard_slug().to_string(),
3749 440 : ];
3750 440 : Metrics {
3751 440 : count_accounted_start: {
3752 440 : GlobalAndPerTenantIntCounter {
3753 440 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
3754 440 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
3755 440 : .with_label_values(per_tenant_label_values),
3756 440 : }
3757 440 : },
3758 440 : count_accounted_finish: {
3759 440 : GlobalAndPerTenantIntCounter {
3760 440 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
3761 440 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
3762 440 : .with_label_values(per_tenant_label_values),
3763 440 : }
3764 440 : },
3765 440 : wait_time: {
3766 440 : GlobalAndPerTenantIntCounter {
3767 440 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
3768 440 : per_tenant: WAIT_USECS_PER_TENANT
3769 440 : .with_label_values(per_tenant_label_values),
3770 440 : }
3771 440 : },
3772 440 : count_throttled: {
3773 440 : GlobalAndPerTenantIntCounter {
3774 440 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
3775 440 : per_tenant: WAIT_COUNT_PER_TENANT
3776 440 : .with_label_values(per_tenant_label_values),
3777 440 : }
3778 440 : },
3779 440 : }
3780 440 : }
3781 : }
3782 :
3783 0 : pub(crate) fn preinitialize_global_metrics() {
3784 0 : Lazy::force(&COUNT_ACCOUNTED_START);
3785 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
3786 0 : Lazy::force(&WAIT_USECS);
3787 0 : Lazy::force(&WAIT_COUNT);
3788 0 : }
3789 :
3790 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3791 48 : for m in &[
3792 12 : &COUNT_ACCOUNTED_START_PER_TENANT,
3793 12 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
3794 12 : &WAIT_USECS_PER_TENANT,
3795 12 : &WAIT_COUNT_PER_TENANT,
3796 12 : ] {
3797 96 : for kind in KINDS {
3798 48 : let _ = m.remove_label_values(&[
3799 48 : kind,
3800 48 : &tenant_shard_id.tenant_id.to_string(),
3801 48 : &tenant_shard_id.shard_slug().to_string(),
3802 48 : ]);
3803 48 : }
3804 : }
3805 12 : }
3806 : }
3807 :
3808 : pub(crate) mod disk_usage_based_eviction {
3809 : use super::*;
3810 :
3811 : pub(crate) struct Metrics {
3812 : pub(crate) tenant_collection_time: Histogram,
3813 : pub(crate) tenant_layer_count: Histogram,
3814 : pub(crate) layers_collected: IntCounter,
3815 : pub(crate) layers_selected: IntCounter,
3816 : pub(crate) layers_evicted: IntCounter,
3817 : }
3818 :
3819 : impl Default for Metrics {
3820 0 : fn default() -> Self {
3821 0 : let tenant_collection_time = register_histogram!(
3822 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
3823 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
3824 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
3825 0 : )
3826 0 : .unwrap();
3827 0 :
3828 0 : let tenant_layer_count = register_histogram!(
3829 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
3830 0 : "Amount of layers gathered from a tenant",
3831 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
3832 0 : )
3833 0 : .unwrap();
3834 0 :
3835 0 : let layers_collected = register_int_counter!(
3836 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
3837 0 : "Amount of layers collected"
3838 0 : )
3839 0 : .unwrap();
3840 0 :
3841 0 : let layers_selected = register_int_counter!(
3842 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
3843 0 : "Amount of layers selected"
3844 0 : )
3845 0 : .unwrap();
3846 0 :
3847 0 : let layers_evicted = register_int_counter!(
3848 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
3849 0 : "Amount of layers successfully evicted"
3850 0 : )
3851 0 : .unwrap();
3852 0 :
3853 0 : Self {
3854 0 : tenant_collection_time,
3855 0 : tenant_layer_count,
3856 0 : layers_collected,
3857 0 : layers_selected,
3858 0 : layers_evicted,
3859 0 : }
3860 0 : }
3861 : }
3862 :
3863 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
3864 : }
3865 :
3866 392 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
3867 392 : register_uint_gauge_vec!(
3868 392 : "pageserver_tokio_executor_thread_configured_count",
3869 392 : "Total number of configued tokio executor threads in the process.
3870 392 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
3871 392 : &["setup"],
3872 392 : )
3873 392 : .unwrap()
3874 392 : });
3875 :
3876 392 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
3877 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
3878 392 : let _guard = SERIALIZE.lock().unwrap();
3879 392 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
3880 392 : TOKIO_EXECUTOR_THREAD_COUNT
3881 392 : .get_metric_with_label_values(&[setup])
3882 392 : .unwrap()
3883 392 : .set(u64::try_from(num_threads.get()).unwrap());
3884 392 : }
3885 :
3886 0 : pub fn preinitialize_metrics(conf: &'static PageServerConf) {
3887 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
3888 0 :
3889 0 : // Python tests need these and on some we do alerting.
3890 0 : //
3891 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
3892 0 : // order:
3893 0 : // - global metrics reside in a Lazy<PageserverMetrics>
3894 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
3895 0 : // - could move the statics into TimelineMetrics::new()?
3896 0 :
3897 0 : // counters
3898 0 : [
3899 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
3900 0 : &WALRECEIVER_STARTED_CONNECTIONS,
3901 0 : &WALRECEIVER_BROKER_UPDATES,
3902 0 : &WALRECEIVER_CANDIDATES_ADDED,
3903 0 : &WALRECEIVER_CANDIDATES_REMOVED,
3904 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
3905 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
3906 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
3907 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
3908 0 : &CIRCUIT_BREAKERS_BROKEN,
3909 0 : &CIRCUIT_BREAKERS_UNBROKEN,
3910 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
3911 0 : ]
3912 0 : .into_iter()
3913 0 : .for_each(|c| {
3914 0 : Lazy::force(c);
3915 0 : });
3916 0 :
3917 0 : // Deletion queue stats
3918 0 : Lazy::force(&DELETION_QUEUE);
3919 0 :
3920 0 : // Tenant stats
3921 0 : Lazy::force(&TENANT);
3922 0 :
3923 0 : // Tenant manager stats
3924 0 : Lazy::force(&TENANT_MANAGER);
3925 0 :
3926 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
3927 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
3928 :
3929 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
3930 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
3931 0 : // values from last restart.
3932 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
3933 0 : }
3934 :
3935 : // countervecs
3936 0 : [
3937 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
3938 0 : &SMGR_QUERY_STARTED_GLOBAL,
3939 0 : ]
3940 0 : .into_iter()
3941 0 : .for_each(|c| {
3942 0 : Lazy::force(c);
3943 0 : });
3944 0 :
3945 0 : // gauges
3946 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
3947 0 :
3948 0 : // histograms
3949 0 : [
3950 0 : &LAYERS_PER_READ_GLOBAL,
3951 0 : &DELTAS_PER_READ_GLOBAL,
3952 0 : &WAIT_LSN_TIME,
3953 0 : &WAL_REDO_TIME,
3954 0 : &WAL_REDO_RECORDS_HISTOGRAM,
3955 0 : &WAL_REDO_BYTES_HISTOGRAM,
3956 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
3957 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
3958 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
3959 0 : ]
3960 0 : .into_iter()
3961 0 : .for_each(|h| {
3962 0 : Lazy::force(h);
3963 0 : });
3964 0 :
3965 0 : // Custom
3966 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
3967 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
3968 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
3969 0 :
3970 0 : tenant_throttling::preinitialize_global_metrics();
3971 0 : }
|