Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::pin::Pin;
5 : use std::sync::atomic::AtomicU64;
6 : use std::sync::{Arc, Mutex};
7 : use std::task::{Context, Poll};
8 : use std::time::{Duration, Instant};
9 :
10 : use enum_map::{Enum as _, EnumMap};
11 : use futures::Future;
12 : use metrics::{
13 : Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
14 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
15 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
16 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
17 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
18 : };
19 : use once_cell::sync::Lazy;
20 : use pageserver_api::config::{
21 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
22 : PageServiceProtocolPipelinedExecutionStrategy,
23 : };
24 : use pageserver_api::models::InMemoryLayerInfo;
25 : use pageserver_api::shard::TenantShardId;
26 : use pin_project_lite::pin_project;
27 : use postgres_backend::{QueryError, is_expected_io_error};
28 : use pq_proto::framed::ConnectionError;
29 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
30 : use strum_macros::{IntoStaticStr, VariantNames};
31 : use utils::id::TimelineId;
32 :
33 : use crate::config::PageServerConf;
34 : use crate::context::{PageContentKind, RequestContext};
35 : use crate::pgdatadir_mapping::DatadirModificationStats;
36 : use crate::task_mgr::TaskKind;
37 : use crate::tenant::Timeline;
38 : use crate::tenant::layer_map::LayerMap;
39 : use crate::tenant::mgr::TenantSlot;
40 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
41 : use crate::tenant::tasks::BackgroundLoopKind;
42 : use crate::tenant::throttle::ThrottleResult;
43 :
44 : /// Prometheus histogram buckets (in seconds) for operations in the critical
45 : /// path. In other words, operations that directly affect that latency of user
46 : /// queries.
47 : ///
48 : /// The buckets capture the majority of latencies in the microsecond and
49 : /// millisecond range but also extend far enough up to distinguish "bad" from
50 : /// "really bad".
51 : const CRITICAL_OP_BUCKETS: &[f64] = &[
52 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
53 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
54 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
55 : ];
56 :
57 : // Metrics collected on operations on the storage repository.
58 : #[derive(Debug, VariantNames, IntoStaticStr)]
59 : #[strum(serialize_all = "kebab_case")]
60 : pub(crate) enum StorageTimeOperation {
61 : #[strum(serialize = "layer flush")]
62 : LayerFlush,
63 :
64 : #[strum(serialize = "layer flush delay")]
65 : LayerFlushDelay,
66 :
67 : #[strum(serialize = "compact")]
68 : Compact,
69 :
70 : #[strum(serialize = "create images")]
71 : CreateImages,
72 :
73 : #[strum(serialize = "logical size")]
74 : LogicalSize,
75 :
76 : #[strum(serialize = "imitate logical size")]
77 : ImitateLogicalSize,
78 :
79 : #[strum(serialize = "load layer map")]
80 : LoadLayerMap,
81 :
82 : #[strum(serialize = "gc")]
83 : Gc,
84 :
85 : #[strum(serialize = "find gc cutoffs")]
86 : FindGcCutoffs,
87 : }
88 :
89 412 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
90 412 : register_counter_vec!(
91 412 : "pageserver_storage_operations_seconds_sum",
92 412 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
93 412 : &["operation", "tenant_id", "shard_id", "timeline_id"],
94 412 : )
95 412 : .expect("failed to define a metric")
96 412 : });
97 :
98 412 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
99 412 : register_int_counter_vec!(
100 412 : "pageserver_storage_operations_seconds_count",
101 412 : "Count of storage operations with operation, tenant and timeline dimensions",
102 412 : &["operation", "tenant_id", "shard_id", "timeline_id"],
103 412 : )
104 412 : .expect("failed to define a metric")
105 412 : });
106 :
107 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
108 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
109 :
110 412 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
111 412 : register_histogram_vec!(
112 412 : "pageserver_storage_operations_seconds_global",
113 412 : "Time spent on storage operations",
114 412 : &["operation"],
115 412 : STORAGE_OP_BUCKETS.into(),
116 412 : )
117 412 : .expect("failed to define a metric")
118 412 : });
119 :
120 : /// Measures layers visited per read (i.e. read amplification).
121 : ///
122 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
123 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
124 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
125 : /// care about.
126 412 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
127 412 : register_histogram_vec!(
128 412 : "pageserver_layers_per_read",
129 412 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
130 412 : &["tenant_id", "shard_id", "timeline_id"],
131 412 : // Low resolution to reduce cardinality.
132 412 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
133 412 : )
134 412 : .expect("failed to define a metric")
135 412 : });
136 :
137 404 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
138 404 : register_histogram!(
139 404 : "pageserver_layers_per_read_global",
140 404 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
141 404 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
142 404 : )
143 404 : .expect("failed to define a metric")
144 404 : });
145 :
146 404 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
147 404 : register_histogram!(
148 404 : "pageserver_layers_per_read_batch_global",
149 404 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
150 404 : vec![
151 404 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
152 404 : ],
153 404 : )
154 404 : .expect("failed to define a metric")
155 404 : });
156 :
157 404 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
158 404 : register_histogram!(
159 404 : "pageserver_layers_per_read_amortized_global",
160 404 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
161 404 : all visited layers are divided by number of reads.",
162 404 : vec![
163 404 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
164 404 : ],
165 404 : )
166 404 : .expect("failed to define a metric")
167 404 : });
168 :
169 404 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
170 404 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
171 404 : register_histogram!(
172 404 : "pageserver_deltas_per_read_global",
173 404 : "Number of delta pages applied to image page per read",
174 404 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
175 404 : )
176 404 : .expect("failed to define a metric")
177 404 : });
178 :
179 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
180 0 : register_uint_gauge!(
181 0 : "pageserver_concurrent_initdb",
182 0 : "Number of initdb processes running"
183 0 : )
184 0 : .expect("failed to define a metric")
185 0 : });
186 :
187 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
188 0 : register_histogram!(
189 0 : "pageserver_initdb_semaphore_seconds_global",
190 0 : "Time spent getting a permit from the global initdb semaphore",
191 0 : STORAGE_OP_BUCKETS.into()
192 0 : )
193 0 : .expect("failed to define metric")
194 0 : });
195 :
196 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
197 0 : register_histogram!(
198 0 : "pageserver_initdb_seconds_global",
199 0 : "Time spent performing initdb",
200 0 : STORAGE_OP_BUCKETS.into()
201 0 : )
202 0 : .expect("failed to define metric")
203 0 : });
204 :
205 : pub(crate) struct GetVectoredLatency {
206 : map: EnumMap<TaskKind, Option<Histogram>>,
207 : }
208 :
209 : #[allow(dead_code)]
210 : pub(crate) struct ScanLatency {
211 : map: EnumMap<TaskKind, Option<Histogram>>,
212 : }
213 :
214 : impl GetVectoredLatency {
215 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
216 : // cardinality of the metric.
217 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
218 :
219 39432 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
220 39432 : self.map[task_kind].as_ref()
221 39432 : }
222 : }
223 :
224 : impl ScanLatency {
225 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
226 : // cardinality of the metric.
227 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
228 :
229 24 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
230 24 : self.map[task_kind].as_ref()
231 24 : }
232 : }
233 :
234 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
235 : parent: &'a Histogram,
236 : start: std::time::Instant,
237 : }
238 :
239 : impl<'a> ScanLatencyOngoingRecording<'a> {
240 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
241 0 : let start = Instant::now();
242 0 : ScanLatencyOngoingRecording { parent, start }
243 0 : }
244 :
245 0 : pub(crate) fn observe(self) {
246 0 : let elapsed = self.start.elapsed();
247 0 : self.parent.observe(elapsed.as_secs_f64());
248 0 : }
249 : }
250 :
251 396 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
252 396 : let inner = register_histogram_vec!(
253 396 : "pageserver_get_vectored_seconds",
254 396 : "Time spent in get_vectored.",
255 396 : &["task_kind"],
256 396 : CRITICAL_OP_BUCKETS.into(),
257 396 : )
258 396 : .expect("failed to define a metric");
259 396 :
260 396 : GetVectoredLatency {
261 12276 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
262 12276 : let task_kind = TaskKind::from_usize(task_kind_idx);
263 12276 :
264 12276 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
265 792 : let task_kind = task_kind.into();
266 792 : Some(inner.with_label_values(&[task_kind]))
267 : } else {
268 11484 : None
269 : }
270 12276 : })),
271 396 : }
272 396 : });
273 :
274 8 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
275 8 : let inner = register_histogram_vec!(
276 8 : "pageserver_scan_seconds",
277 8 : "Time spent in scan.",
278 8 : &["task_kind"],
279 8 : CRITICAL_OP_BUCKETS.into(),
280 8 : )
281 8 : .expect("failed to define a metric");
282 8 :
283 8 : ScanLatency {
284 248 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
285 248 : let task_kind = TaskKind::from_usize(task_kind_idx);
286 248 :
287 248 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
288 8 : let task_kind = task_kind.into();
289 8 : Some(inner.with_label_values(&[task_kind]))
290 : } else {
291 240 : None
292 : }
293 248 : })),
294 8 : }
295 8 : });
296 :
297 : pub(crate) struct PageCacheMetricsForTaskKind {
298 : pub read_accesses_immutable: IntCounter,
299 : pub read_hits_immutable: IntCounter,
300 : }
301 :
302 : pub(crate) struct PageCacheMetrics {
303 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
304 : }
305 :
306 188 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
307 188 : register_int_counter_vec!(
308 188 : "pageserver_page_cache_read_hits_total",
309 188 : "Number of read accesses to the page cache that hit",
310 188 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
311 188 : )
312 188 : .expect("failed to define a metric")
313 188 : });
314 :
315 188 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
316 188 : register_int_counter_vec!(
317 188 : "pageserver_page_cache_read_accesses_total",
318 188 : "Number of read accesses to the page cache",
319 188 : &["task_kind", "key_kind", "content_kind"]
320 188 : )
321 188 : .expect("failed to define a metric")
322 188 : });
323 :
324 188 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
325 5828 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
326 5828 : let task_kind = TaskKind::from_usize(task_kind);
327 5828 : let task_kind: &'static str = task_kind.into();
328 46624 : EnumMap::from_array(std::array::from_fn(|content_kind| {
329 46624 : let content_kind = PageContentKind::from_usize(content_kind);
330 46624 : let content_kind: &'static str = content_kind.into();
331 46624 : PageCacheMetricsForTaskKind {
332 46624 : read_accesses_immutable: {
333 46624 : PAGE_CACHE_READ_ACCESSES
334 46624 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
335 46624 : .unwrap()
336 46624 : },
337 46624 :
338 46624 : read_hits_immutable: {
339 46624 : PAGE_CACHE_READ_HITS
340 46624 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
341 46624 : .unwrap()
342 46624 : },
343 46624 : }
344 46624 : }))
345 5828 : })),
346 188 : });
347 :
348 : impl PageCacheMetrics {
349 1942576 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
350 1942576 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
351 1942576 : }
352 : }
353 :
354 : pub(crate) struct PageCacheSizeMetrics {
355 : pub max_bytes: UIntGauge,
356 :
357 : pub current_bytes_immutable: UIntGauge,
358 : }
359 :
360 188 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
361 188 : register_uint_gauge_vec!(
362 188 : "pageserver_page_cache_size_current_bytes",
363 188 : "Current size of the page cache in bytes, by key kind",
364 188 : &["key_kind"]
365 188 : )
366 188 : .expect("failed to define a metric")
367 188 : });
368 :
369 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
370 188 : Lazy::new(|| PageCacheSizeMetrics {
371 188 : max_bytes: {
372 188 : register_uint_gauge!(
373 188 : "pageserver_page_cache_size_max_bytes",
374 188 : "Maximum size of the page cache in bytes"
375 188 : )
376 188 : .expect("failed to define a metric")
377 188 : },
378 188 : current_bytes_immutable: {
379 188 : PAGE_CACHE_SIZE_CURRENT_BYTES
380 188 : .get_metric_with_label_values(&["immutable"])
381 188 : .unwrap()
382 188 : },
383 188 : });
384 :
385 : pub(crate) mod page_cache_eviction_metrics {
386 : use std::num::NonZeroUsize;
387 :
388 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
389 : use once_cell::sync::Lazy;
390 :
391 : #[derive(Clone, Copy)]
392 : pub(crate) enum Outcome {
393 : FoundSlotUnused { iters: NonZeroUsize },
394 : FoundSlotEvicted { iters: NonZeroUsize },
395 : ItersExceeded { iters: NonZeroUsize },
396 : }
397 :
398 188 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
399 188 : register_int_counter_vec!(
400 188 : "pageserver_page_cache_find_victim_iters_total",
401 188 : "Counter for the number of iterations in the find_victim loop",
402 188 : &["outcome"],
403 188 : )
404 188 : .expect("failed to define a metric")
405 188 : });
406 :
407 188 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
408 188 : register_int_counter_vec!(
409 188 : "pageserver_page_cache_find_victim_calls",
410 188 : "Incremented at the end of each find_victim() call.\
411 188 : Filter by outcome to get e.g., eviction rate.",
412 188 : &["outcome"]
413 188 : )
414 188 : .unwrap()
415 188 : });
416 :
417 63705 : pub(crate) fn observe(outcome: Outcome) {
418 : macro_rules! dry {
419 : ($label:literal, $iters:expr) => {{
420 : static LABEL: &'static str = $label;
421 : static ITERS_TOTAL: Lazy<IntCounter> =
422 228 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
423 : static CALLS: Lazy<IntCounter> =
424 228 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
425 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
426 : CALLS.inc();
427 : }};
428 : }
429 63705 : match outcome {
430 3288 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
431 60417 : Outcome::FoundSlotEvicted { iters } => {
432 60417 : dry!("found_evicted", iters)
433 : }
434 0 : Outcome::ItersExceeded { iters } => {
435 0 : dry!("err_iters_exceeded", iters);
436 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
437 0 : }
438 : }
439 63705 : }
440 : }
441 :
442 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
443 0 : register_int_counter_vec!(
444 0 : "page_cache_errors_total",
445 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
446 0 : &["error_kind"]
447 0 : )
448 0 : .expect("failed to define a metric")
449 0 : });
450 :
451 : #[derive(IntoStaticStr)]
452 : #[strum(serialize_all = "kebab_case")]
453 : pub(crate) enum PageCacheErrorKind {
454 : AcquirePinnedSlotTimeout,
455 : EvictIterLimit,
456 : }
457 :
458 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
459 0 : PAGE_CACHE_ERRORS
460 0 : .get_metric_with_label_values(&[error_kind.into()])
461 0 : .unwrap()
462 0 : .inc();
463 0 : }
464 :
465 40 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
466 40 : register_histogram!(
467 40 : "pageserver_wait_lsn_seconds",
468 40 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
469 40 : CRITICAL_OP_BUCKETS.into(),
470 40 : )
471 40 : .expect("failed to define a metric")
472 40 : });
473 :
474 412 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
475 412 : register_int_counter_pair_vec!(
476 412 : "pageserver_wait_lsn_started_count",
477 412 : "Number of wait_lsn operations started.",
478 412 : "pageserver_wait_lsn_finished_count",
479 412 : "Number of wait_lsn operations finished.",
480 412 : &["tenant_id", "shard_id", "timeline_id"],
481 412 : )
482 412 : .expect("failed to define a metric")
483 412 : });
484 :
485 412 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
486 412 : register_int_counter_vec!(
487 412 : "pageserver_wait_lsn_in_progress_micros",
488 412 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
489 412 : &["tenant_id", "shard_id", "timeline_id"],
490 412 : )
491 412 : .expect("failed to define a metric")
492 412 : });
493 :
494 412 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
495 412 : register_int_counter!(
496 412 : "pageserver_wait_lsn_in_progress_micros_global",
497 412 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
498 412 : )
499 412 : .expect("failed to define a metric")
500 412 : });
501 :
502 412 : static FLUSH_WAIT_UPLOAD_TIME: Lazy<GaugeVec> = Lazy::new(|| {
503 412 : register_gauge_vec!(
504 412 : "pageserver_flush_wait_upload_seconds",
505 412 : "Time spent waiting for preceding uploads during layer flush",
506 412 : &["tenant_id", "shard_id", "timeline_id"]
507 412 : )
508 412 : .expect("failed to define a metric")
509 412 : });
510 :
511 412 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
512 412 : register_int_gauge_vec!(
513 412 : "pageserver_last_record_lsn",
514 412 : "Last record LSN grouped by timeline",
515 412 : &["tenant_id", "shard_id", "timeline_id"]
516 412 : )
517 412 : .expect("failed to define a metric")
518 412 : });
519 :
520 412 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
521 412 : register_int_gauge_vec!(
522 412 : "pageserver_disk_consistent_lsn",
523 412 : "Disk consistent LSN grouped by timeline",
524 412 : &["tenant_id", "shard_id", "timeline_id"]
525 412 : )
526 412 : .expect("failed to define a metric")
527 412 : });
528 :
529 412 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
530 412 : register_uint_gauge_vec!(
531 412 : "pageserver_projected_remote_consistent_lsn",
532 412 : "Projected remote consistent LSN grouped by timeline",
533 412 : &["tenant_id", "shard_id", "timeline_id"]
534 412 : )
535 412 : .expect("failed to define a metric")
536 412 : });
537 :
538 412 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
539 412 : register_uint_gauge_vec!(
540 412 : "pageserver_pitr_history_size",
541 412 : "Data written since PITR cutoff on this timeline",
542 412 : &["tenant_id", "shard_id", "timeline_id"]
543 412 : )
544 412 : .expect("failed to define a metric")
545 412 : });
546 :
547 : #[derive(
548 240 : strum_macros::EnumIter,
549 0 : strum_macros::EnumString,
550 : strum_macros::Display,
551 : strum_macros::IntoStaticStr,
552 : )]
553 : #[strum(serialize_all = "kebab_case")]
554 : pub(crate) enum LayerKind {
555 : Delta,
556 : Image,
557 : }
558 :
559 : #[derive(
560 100 : strum_macros::EnumIter,
561 0 : strum_macros::EnumString,
562 : strum_macros::Display,
563 : strum_macros::IntoStaticStr,
564 : )]
565 : #[strum(serialize_all = "kebab_case")]
566 : pub(crate) enum LayerLevel {
567 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
568 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
569 : Frozen,
570 : L0,
571 : L1,
572 : }
573 :
574 404 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
575 404 : register_uint_gauge_vec!(
576 404 : "pageserver_layer_bytes",
577 404 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
578 404 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
579 404 : )
580 404 : .expect("failed to define a metric")
581 404 : });
582 :
583 404 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
584 404 : register_uint_gauge_vec!(
585 404 : "pageserver_layer_count",
586 404 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
587 404 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
588 404 : )
589 404 : .expect("failed to define a metric")
590 404 : });
591 :
592 412 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
593 412 : register_uint_gauge_vec!(
594 412 : "pageserver_archive_size",
595 412 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
596 412 : &["tenant_id", "shard_id", "timeline_id"]
597 412 : )
598 412 : .expect("failed to define a metric")
599 412 : });
600 :
601 412 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
602 412 : register_int_gauge_vec!(
603 412 : "pageserver_standby_horizon",
604 412 : "Standby apply LSN for which GC is hold off, by timeline.",
605 412 : &["tenant_id", "shard_id", "timeline_id"]
606 412 : )
607 412 : .expect("failed to define a metric")
608 412 : });
609 :
610 412 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
611 412 : register_uint_gauge_vec!(
612 412 : "pageserver_resident_physical_size",
613 412 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
614 412 : &["tenant_id", "shard_id", "timeline_id"]
615 412 : )
616 412 : .expect("failed to define a metric")
617 412 : });
618 :
619 412 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
620 412 : register_uint_gauge_vec!(
621 412 : "pageserver_visible_physical_size",
622 412 : "The size of the layer files present in the pageserver's filesystem.",
623 412 : &["tenant_id", "shard_id", "timeline_id"]
624 412 : )
625 412 : .expect("failed to define a metric")
626 412 : });
627 :
628 404 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
629 404 : register_uint_gauge!(
630 404 : "pageserver_resident_physical_size_global",
631 404 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
632 404 : )
633 404 : .expect("failed to define a metric")
634 404 : });
635 :
636 412 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
637 412 : register_uint_gauge_vec!(
638 412 : "pageserver_remote_physical_size",
639 412 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
640 412 : // Corollary: If any files are missing from the index part, they won't be included here.
641 412 : &["tenant_id", "shard_id", "timeline_id"]
642 412 : )
643 412 : .expect("failed to define a metric")
644 412 : });
645 :
646 412 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
647 412 : register_uint_gauge!(
648 412 : "pageserver_remote_physical_size_global",
649 412 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
650 412 : )
651 412 : .expect("failed to define a metric")
652 412 : });
653 :
654 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
655 12 : register_int_counter!(
656 12 : "pageserver_remote_ondemand_downloaded_layers_total",
657 12 : "Total on-demand downloaded layers"
658 12 : )
659 12 : .unwrap()
660 12 : });
661 :
662 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
663 12 : register_int_counter!(
664 12 : "pageserver_remote_ondemand_downloaded_bytes_total",
665 12 : "Total bytes of layers on-demand downloaded",
666 12 : )
667 12 : .unwrap()
668 12 : });
669 :
670 412 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
671 412 : register_uint_gauge_vec!(
672 412 : "pageserver_current_logical_size",
673 412 : "Current logical size grouped by timeline",
674 412 : &["tenant_id", "shard_id", "timeline_id"]
675 412 : )
676 412 : .expect("failed to define current logical size metric")
677 412 : });
678 :
679 412 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
680 412 : register_int_gauge_vec!(
681 412 : "pageserver_aux_file_estimated_size",
682 412 : "The size of all aux files for a timeline in aux file v2 store.",
683 412 : &["tenant_id", "shard_id", "timeline_id"]
684 412 : )
685 412 : .expect("failed to define a metric")
686 412 : });
687 :
688 412 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
689 412 : register_uint_gauge_vec!(
690 412 : "pageserver_valid_lsn_lease_count",
691 412 : "The number of valid leases after refreshing gc info.",
692 412 : &["tenant_id", "shard_id", "timeline_id"],
693 412 : )
694 412 : .expect("failed to define a metric")
695 412 : });
696 :
697 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
698 0 : register_int_counter!(
699 0 : "pageserver_circuit_breaker_broken",
700 0 : "How many times a circuit breaker has broken"
701 0 : )
702 0 : .expect("failed to define a metric")
703 0 : });
704 :
705 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
706 0 : register_int_counter!(
707 0 : "pageserver_circuit_breaker_unbroken",
708 0 : "How many times a circuit breaker has been un-broken (recovered)"
709 0 : )
710 0 : .expect("failed to define a metric")
711 0 : });
712 :
713 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
714 396 : register_int_counter!(
715 396 : "pageserver_compression_image_in_bytes_total",
716 396 : "Size of data written into image layers before compression"
717 396 : )
718 396 : .expect("failed to define a metric")
719 396 : });
720 :
721 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
722 396 : register_int_counter!(
723 396 : "pageserver_compression_image_in_bytes_considered",
724 396 : "Size of potentially compressible data written into image layers before compression"
725 396 : )
726 396 : .expect("failed to define a metric")
727 396 : });
728 :
729 396 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
730 396 : register_int_counter!(
731 396 : "pageserver_compression_image_in_bytes_chosen",
732 396 : "Size of data whose compressed form was written into image layers"
733 396 : )
734 396 : .expect("failed to define a metric")
735 396 : });
736 :
737 396 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
738 396 : register_int_counter!(
739 396 : "pageserver_compression_image_out_bytes_total",
740 396 : "Size of compressed image layer written"
741 396 : )
742 396 : .expect("failed to define a metric")
743 396 : });
744 :
745 20 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
746 20 : register_uint_gauge!(
747 20 : "pageserver_relsize_cache_entries",
748 20 : "Number of entries in the relation size cache",
749 20 : )
750 20 : .expect("failed to define a metric")
751 20 : });
752 :
753 20 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
754 20 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
755 20 : .expect("failed to define a metric")
756 20 : });
757 :
758 20 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
759 20 : register_int_counter!(
760 20 : "pageserver_relsize_cache_misses",
761 20 : "Relation size cache misses",
762 20 : )
763 20 : .expect("failed to define a metric")
764 20 : });
765 :
766 8 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
767 8 : register_int_counter!(
768 8 : "pageserver_relsize_cache_misses_old",
769 8 : "Relation size cache misses where the lookup LSN is older than the last relation update"
770 8 : )
771 8 : .expect("failed to define a metric")
772 8 : });
773 :
774 : pub(crate) mod initial_logical_size {
775 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
776 : use once_cell::sync::Lazy;
777 :
778 : pub(crate) struct StartCalculation(IntCounterVec);
779 412 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
780 412 : StartCalculation(
781 412 : register_int_counter_vec!(
782 412 : "pageserver_initial_logical_size_start_calculation",
783 412 : "Incremented each time we start an initial logical size calculation attempt. \
784 412 : The `circumstances` label provides some additional details.",
785 412 : &["attempt", "circumstances"]
786 412 : )
787 412 : .unwrap(),
788 412 : )
789 412 : });
790 :
791 : struct DropCalculation {
792 : first: IntCounter,
793 : retry: IntCounter,
794 : }
795 :
796 412 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
797 412 : let vec = register_int_counter_vec!(
798 412 : "pageserver_initial_logical_size_drop_calculation",
799 412 : "Incremented each time we abort a started size calculation attmpt.",
800 412 : &["attempt"]
801 412 : )
802 412 : .unwrap();
803 412 : DropCalculation {
804 412 : first: vec.with_label_values(&["first"]),
805 412 : retry: vec.with_label_values(&["retry"]),
806 412 : }
807 412 : });
808 :
809 : pub(crate) struct Calculated {
810 : pub(crate) births: IntCounter,
811 : pub(crate) deaths: IntCounter,
812 : }
813 :
814 412 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
815 412 : births: register_int_counter!(
816 412 : "pageserver_initial_logical_size_finish_calculation",
817 412 : "Incremented every time we finish calculation of initial logical size.\
818 412 : If everything is working well, this should happen at most once per Timeline object."
819 412 : )
820 412 : .unwrap(),
821 412 : deaths: register_int_counter!(
822 412 : "pageserver_initial_logical_size_drop_finished_calculation",
823 412 : "Incremented when we drop a finished initial logical size calculation result.\
824 412 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
825 412 : )
826 412 : .unwrap(),
827 412 : });
828 :
829 : pub(crate) struct OngoingCalculationGuard {
830 : inc_drop_calculation: Option<IntCounter>,
831 : }
832 :
833 : #[derive(strum_macros::IntoStaticStr)]
834 : pub(crate) enum StartCircumstances {
835 : EmptyInitial,
836 : SkippedConcurrencyLimiter,
837 : AfterBackgroundTasksRateLimit,
838 : }
839 :
840 : impl StartCalculation {
841 436 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
842 436 : let circumstances_label: &'static str = circumstances.into();
843 436 : self.0
844 436 : .with_label_values(&["first", circumstances_label])
845 436 : .inc();
846 436 : OngoingCalculationGuard {
847 436 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
848 436 : }
849 436 : }
850 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
851 0 : let circumstances_label: &'static str = circumstances.into();
852 0 : self.0
853 0 : .with_label_values(&["retry", circumstances_label])
854 0 : .inc();
855 0 : OngoingCalculationGuard {
856 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
857 0 : }
858 0 : }
859 : }
860 :
861 : impl Drop for OngoingCalculationGuard {
862 436 : fn drop(&mut self) {
863 436 : if let Some(counter) = self.inc_drop_calculation.take() {
864 0 : counter.inc();
865 436 : }
866 436 : }
867 : }
868 :
869 : impl OngoingCalculationGuard {
870 436 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
871 436 : drop(self.inc_drop_calculation.take());
872 436 : CALCULATED.births.inc();
873 436 : FinishedCalculationGuard {
874 436 : inc_on_drop: CALCULATED.deaths.clone(),
875 436 : }
876 436 : }
877 : }
878 :
879 : pub(crate) struct FinishedCalculationGuard {
880 : inc_on_drop: IntCounter,
881 : }
882 :
883 : impl Drop for FinishedCalculationGuard {
884 12 : fn drop(&mut self) {
885 12 : self.inc_on_drop.inc();
886 12 : }
887 : }
888 :
889 : // context: https://github.com/neondatabase/neon/issues/5963
890 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
891 0 : Lazy::new(|| {
892 0 : register_int_counter!(
893 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
894 0 : "Counter for the following event: walreceiver calls\
895 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
896 0 : )
897 0 : .unwrap()
898 0 : });
899 : }
900 :
901 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
902 0 : register_uint_gauge_vec!(
903 0 : "pageserver_directory_entries_count",
904 0 : "Sum of the entries in pageserver-stored directory listings",
905 0 : &["tenant_id", "shard_id", "timeline_id"]
906 0 : )
907 0 : .expect("failed to define a metric")
908 0 : });
909 :
910 416 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
911 416 : register_uint_gauge_vec!(
912 416 : "pageserver_tenant_states_count",
913 416 : "Count of tenants per state",
914 416 : &["state"]
915 416 : )
916 416 : .expect("Failed to register pageserver_tenant_states_count metric")
917 416 : });
918 :
919 : /// A set of broken tenants.
920 : ///
921 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
922 : /// tenant.
923 20 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
924 20 : register_uint_gauge_vec!(
925 20 : "pageserver_broken_tenants_count",
926 20 : "Set of broken tenants",
927 20 : &["tenant_id", "shard_id"]
928 20 : )
929 20 : .expect("Failed to register pageserver_tenant_states_count metric")
930 20 : });
931 :
932 12 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
933 12 : register_uint_gauge_vec!(
934 12 : "pageserver_tenant_synthetic_cached_size_bytes",
935 12 : "Synthetic size of each tenant in bytes",
936 12 : &["tenant_id"]
937 12 : )
938 12 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
939 12 : });
940 :
941 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
942 0 : register_histogram_vec!(
943 0 : "pageserver_eviction_iteration_duration_seconds_global",
944 0 : "Time spent on a single eviction iteration",
945 0 : &["period_secs", "threshold_secs"],
946 0 : STORAGE_OP_BUCKETS.into(),
947 0 : )
948 0 : .expect("failed to define a metric")
949 0 : });
950 :
951 412 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
952 412 : register_int_counter_vec!(
953 412 : "pageserver_evictions",
954 412 : "Number of layers evicted from the pageserver",
955 412 : &["tenant_id", "shard_id", "timeline_id"]
956 412 : )
957 412 : .expect("failed to define a metric")
958 412 : });
959 :
960 412 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
961 412 : register_int_counter_vec!(
962 412 : "pageserver_evictions_with_low_residence_duration",
963 412 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
964 412 : Residence duration is determined using the `residence_duration_data_source`.",
965 412 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
966 412 : )
967 412 : .expect("failed to define a metric")
968 412 : });
969 :
970 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
971 0 : register_int_counter!(
972 0 : "pageserver_unexpected_ondemand_downloads_count",
973 0 : "Number of unexpected on-demand downloads. \
974 0 : We log more context for each increment, so, forgo any labels in this metric.",
975 0 : )
976 0 : .expect("failed to define a metric")
977 0 : });
978 :
979 : /// How long did we take to start up? Broken down by labels to describe
980 : /// different phases of startup.
981 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
982 0 : register_gauge_vec!(
983 0 : "pageserver_startup_duration_seconds",
984 0 : "Time taken by phases of pageserver startup, in seconds",
985 0 : &["phase"]
986 0 : )
987 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
988 0 : });
989 :
990 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
991 0 : register_uint_gauge!(
992 0 : "pageserver_startup_is_loading",
993 0 : "1 while in initial startup load of tenants, 0 at other times"
994 0 : )
995 0 : .expect("Failed to register pageserver_startup_is_loading")
996 0 : });
997 :
998 404 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
999 404 : register_uint_gauge!(
1000 404 : "pageserver_timeline_ephemeral_bytes",
1001 404 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
1002 404 : )
1003 404 : .expect("Failed to register metric")
1004 404 : });
1005 :
1006 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
1007 : /// like how long it took to load.
1008 : ///
1009 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1010 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1011 : /// at a timeline level than tenant level.
1012 : pub(crate) struct TenantMetrics {
1013 : /// How long did tenants take to go from construction to active state?
1014 : pub(crate) activation: Histogram,
1015 : pub(crate) preload: Histogram,
1016 : pub(crate) attach: Histogram,
1017 :
1018 : /// How many tenants are included in the initial startup of the pagesrever?
1019 : pub(crate) startup_scheduled: IntCounter,
1020 : pub(crate) startup_complete: IntCounter,
1021 : }
1022 :
1023 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1024 0 : TenantMetrics {
1025 0 : activation: register_histogram!(
1026 0 : "pageserver_tenant_activation_seconds",
1027 0 : "Time taken by tenants to activate, in seconds",
1028 0 : CRITICAL_OP_BUCKETS.into()
1029 0 : )
1030 0 : .expect("Failed to register metric"),
1031 0 : preload: register_histogram!(
1032 0 : "pageserver_tenant_preload_seconds",
1033 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1034 0 : CRITICAL_OP_BUCKETS.into()
1035 0 : )
1036 0 : .expect("Failed to register metric"),
1037 0 : attach: register_histogram!(
1038 0 : "pageserver_tenant_attach_seconds",
1039 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1040 0 : CRITICAL_OP_BUCKETS.into()
1041 0 : )
1042 0 : .expect("Failed to register metric"),
1043 0 : startup_scheduled: register_int_counter!(
1044 0 : "pageserver_tenant_startup_scheduled",
1045 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1046 0 : ).expect("Failed to register metric"),
1047 0 : startup_complete: register_int_counter!(
1048 0 : "pageserver_tenant_startup_complete",
1049 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1050 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1051 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1052 0 : ).expect("Failed to register metric"),
1053 0 : }
1054 0 : });
1055 :
1056 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1057 : #[derive(Debug)]
1058 : pub(crate) struct EvictionsWithLowResidenceDuration {
1059 : data_source: &'static str,
1060 : threshold: Duration,
1061 : counter: Option<IntCounter>,
1062 : }
1063 :
1064 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1065 : data_source: &'static str,
1066 : threshold: Duration,
1067 : }
1068 :
1069 : impl EvictionsWithLowResidenceDurationBuilder {
1070 904 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1071 904 : Self {
1072 904 : data_source,
1073 904 : threshold,
1074 904 : }
1075 904 : }
1076 :
1077 904 : fn build(
1078 904 : &self,
1079 904 : tenant_id: &str,
1080 904 : shard_id: &str,
1081 904 : timeline_id: &str,
1082 904 : ) -> EvictionsWithLowResidenceDuration {
1083 904 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1084 904 : .get_metric_with_label_values(&[
1085 904 : tenant_id,
1086 904 : shard_id,
1087 904 : timeline_id,
1088 904 : self.data_source,
1089 904 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1090 904 : ])
1091 904 : .unwrap();
1092 904 : EvictionsWithLowResidenceDuration {
1093 904 : data_source: self.data_source,
1094 904 : threshold: self.threshold,
1095 904 : counter: Some(counter),
1096 904 : }
1097 904 : }
1098 : }
1099 :
1100 : impl EvictionsWithLowResidenceDuration {
1101 924 : fn threshold_label_value(threshold: Duration) -> String {
1102 924 : format!("{}", threshold.as_secs())
1103 924 : }
1104 :
1105 8 : pub fn observe(&self, observed_value: Duration) {
1106 8 : if observed_value < self.threshold {
1107 8 : self.counter
1108 8 : .as_ref()
1109 8 : .expect("nobody calls this function after `remove_from_vec`")
1110 8 : .inc();
1111 8 : }
1112 8 : }
1113 :
1114 0 : pub fn change_threshold(
1115 0 : &mut self,
1116 0 : tenant_id: &str,
1117 0 : shard_id: &str,
1118 0 : timeline_id: &str,
1119 0 : new_threshold: Duration,
1120 0 : ) {
1121 0 : if new_threshold == self.threshold {
1122 0 : return;
1123 0 : }
1124 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1125 0 : self.data_source,
1126 0 : new_threshold,
1127 0 : )
1128 0 : .build(tenant_id, shard_id, timeline_id);
1129 0 : std::mem::swap(self, &mut with_new);
1130 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1131 0 : }
1132 :
1133 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1134 20 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1135 20 : let Some(_counter) = self.counter.take() else {
1136 0 : return;
1137 : };
1138 :
1139 20 : let threshold = Self::threshold_label_value(self.threshold);
1140 20 :
1141 20 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1142 20 : tenant_id,
1143 20 : shard_id,
1144 20 : timeline_id,
1145 20 : self.data_source,
1146 20 : &threshold,
1147 20 : ]);
1148 20 :
1149 20 : match removed {
1150 0 : Err(e) => {
1151 0 : // this has been hit in staging as
1152 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1153 0 : // because we can be in the drop path already, don't risk:
1154 0 : // - "double-panic => illegal instruction" or
1155 0 : // - future "drop panick => abort"
1156 0 : //
1157 0 : // so just nag: (the error has the labels)
1158 0 : tracing::warn!(
1159 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1160 : );
1161 : }
1162 : Ok(()) => {
1163 : // to help identify cases where we double-remove the same values, let's log all
1164 : // deletions?
1165 20 : tracing::info!(
1166 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1167 : self.data_source
1168 : );
1169 : }
1170 : }
1171 20 : }
1172 : }
1173 :
1174 : // Metrics collected on disk IO operations
1175 : //
1176 : // Roughly logarithmic scale.
1177 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1178 : 0.000030, // 30 usec
1179 : 0.001000, // 1000 usec
1180 : 0.030, // 30 ms
1181 : 1.000, // 1000 ms
1182 : 30.000, // 30000 ms
1183 : ];
1184 :
1185 : /// VirtualFile fs operation variants.
1186 : ///
1187 : /// Operations:
1188 : /// - open ([`std::fs::OpenOptions::open`])
1189 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1190 : /// - close-by-replace (close by replacement algorithm)
1191 : /// - read (`read_at`)
1192 : /// - write (`write_at`)
1193 : /// - seek (modify internal position or file length query)
1194 : /// - fsync ([`std::fs::File::sync_all`])
1195 : /// - metadata ([`std::fs::File::metadata`])
1196 : #[derive(
1197 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1198 : )]
1199 : pub(crate) enum StorageIoOperation {
1200 : Open,
1201 : OpenAfterReplace,
1202 : Close,
1203 : CloseByReplace,
1204 : Read,
1205 : Write,
1206 : Seek,
1207 : Fsync,
1208 : Metadata,
1209 : }
1210 :
1211 : impl StorageIoOperation {
1212 4248 : pub fn as_str(&self) -> &'static str {
1213 4248 : match self {
1214 472 : StorageIoOperation::Open => "open",
1215 472 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1216 472 : StorageIoOperation::Close => "close",
1217 472 : StorageIoOperation::CloseByReplace => "close-by-replace",
1218 472 : StorageIoOperation::Read => "read",
1219 472 : StorageIoOperation::Write => "write",
1220 472 : StorageIoOperation::Seek => "seek",
1221 472 : StorageIoOperation::Fsync => "fsync",
1222 472 : StorageIoOperation::Metadata => "metadata",
1223 : }
1224 4248 : }
1225 : }
1226 :
1227 : /// Tracks time taken by fs operations near VirtualFile.
1228 : #[derive(Debug)]
1229 : pub(crate) struct StorageIoTime {
1230 : metrics: [Histogram; StorageIoOperation::COUNT],
1231 : }
1232 :
1233 : impl StorageIoTime {
1234 472 : fn new() -> Self {
1235 472 : let storage_io_histogram_vec = register_histogram_vec!(
1236 472 : "pageserver_io_operations_seconds",
1237 472 : "Time spent in IO operations",
1238 472 : &["operation"],
1239 472 : STORAGE_IO_TIME_BUCKETS.into()
1240 472 : )
1241 472 : .expect("failed to define a metric");
1242 4248 : let metrics = std::array::from_fn(|i| {
1243 4248 : let op = StorageIoOperation::from_repr(i).unwrap();
1244 4248 : storage_io_histogram_vec
1245 4248 : .get_metric_with_label_values(&[op.as_str()])
1246 4248 : .unwrap()
1247 4248 : });
1248 472 : Self { metrics }
1249 472 : }
1250 :
1251 4024865 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1252 4024865 : &self.metrics[op as usize]
1253 4024865 : }
1254 : }
1255 :
1256 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1257 :
1258 : #[derive(Clone, Copy)]
1259 : #[repr(usize)]
1260 : enum StorageIoSizeOperation {
1261 : Read,
1262 : Write,
1263 : }
1264 :
1265 : impl StorageIoSizeOperation {
1266 : const VARIANTS: &'static [&'static str] = &["read", "write"];
1267 :
1268 2904 : fn as_str(&self) -> &'static str {
1269 2904 : Self::VARIANTS[*self as usize]
1270 2904 : }
1271 : }
1272 :
1273 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1274 548 : static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1275 548 : register_uint_gauge_vec!(
1276 548 : "pageserver_io_operations_bytes_total",
1277 548 : "Total amount of bytes read/written in IO operations",
1278 548 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1279 548 : )
1280 548 : .expect("failed to define a metric")
1281 548 : });
1282 :
1283 : #[derive(Clone, Debug)]
1284 : pub(crate) struct StorageIoSizeMetrics {
1285 : pub read: UIntGauge,
1286 : pub write: UIntGauge,
1287 : }
1288 :
1289 : impl StorageIoSizeMetrics {
1290 1452 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1291 1452 : let read = STORAGE_IO_SIZE
1292 1452 : .get_metric_with_label_values(&[
1293 1452 : StorageIoSizeOperation::Read.as_str(),
1294 1452 : tenant_id,
1295 1452 : shard_id,
1296 1452 : timeline_id,
1297 1452 : ])
1298 1452 : .unwrap();
1299 1452 : let write = STORAGE_IO_SIZE
1300 1452 : .get_metric_with_label_values(&[
1301 1452 : StorageIoSizeOperation::Write.as_str(),
1302 1452 : tenant_id,
1303 1452 : shard_id,
1304 1452 : timeline_id,
1305 1452 : ])
1306 1452 : .unwrap();
1307 1452 : Self { read, write }
1308 1452 : }
1309 : }
1310 :
1311 : #[cfg(not(test))]
1312 : pub(crate) mod virtual_file_descriptor_cache {
1313 : use super::*;
1314 :
1315 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1316 0 : register_uint_gauge!(
1317 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1318 0 : "Maximum number of open file descriptors in the cache."
1319 0 : )
1320 0 : .unwrap()
1321 0 : });
1322 :
1323 : // SIZE_CURRENT: derive it like so:
1324 : // ```
1325 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1326 : // -ignoring(operation)
1327 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1328 : // ```
1329 : }
1330 :
1331 : #[cfg(not(test))]
1332 : pub(crate) mod virtual_file_io_engine {
1333 : use super::*;
1334 :
1335 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1336 0 : register_uint_gauge_vec!(
1337 0 : "pageserver_virtual_file_io_engine_kind",
1338 0 : "The configured io engine for VirtualFile",
1339 0 : &["kind"],
1340 0 : )
1341 0 : .unwrap()
1342 0 : });
1343 : }
1344 :
1345 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1346 : pub(crate) struct SmgrOpTimerInner {
1347 : global_execution_latency_histo: Histogram,
1348 : per_timeline_execution_latency_histo: Option<Histogram>,
1349 :
1350 : global_batch_wait_time: Histogram,
1351 : per_timeline_batch_wait_time: Histogram,
1352 :
1353 : global_flush_in_progress_micros: IntCounter,
1354 : per_timeline_flush_in_progress_micros: IntCounter,
1355 :
1356 : throttling: Arc<tenant_throttling::Pagestream>,
1357 :
1358 : timings: SmgrOpTimerState,
1359 : }
1360 :
1361 : /// The stages of request processing are represented by the enum variants.
1362 : /// Used as part of [`SmgrOpTimerInner::timings`].
1363 : ///
1364 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1365 : /// transition points.
1366 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1367 : /// to the next state.
1368 : ///
1369 : /// Each request goes through every stage, in all configurations.
1370 : ///
1371 : #[derive(Debug)]
1372 : enum SmgrOpTimerState {
1373 : Received {
1374 : // In the future, we may want to track the full time the request spent
1375 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1376 : // `received_at` would be used for that.
1377 : #[allow(dead_code)]
1378 : received_at: Instant,
1379 : },
1380 : Throttling {
1381 : throttle_started_at: Instant,
1382 : },
1383 : Batching {
1384 : throttle_done_at: Instant,
1385 : },
1386 : Executing {
1387 : execution_started_at: Instant,
1388 : },
1389 : Flushing,
1390 : // NB: when adding observation points, remember to update the Drop impl.
1391 : }
1392 :
1393 : // NB: when adding observation points, remember to update the Drop impl.
1394 : impl SmgrOpTimer {
1395 : /// See [`SmgrOpTimerState`] for more context.
1396 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1397 0 : let Some(inner) = self.0.as_mut() else {
1398 0 : return;
1399 : };
1400 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1401 0 : return;
1402 : };
1403 0 : inner.throttling.count_accounted_start.inc();
1404 0 : inner.timings = SmgrOpTimerState::Throttling {
1405 0 : throttle_started_at: at,
1406 0 : };
1407 0 : }
1408 :
1409 : /// See [`SmgrOpTimerState`] for more context.
1410 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1411 0 : let Some(inner) = self.0.as_mut() else {
1412 0 : return;
1413 : };
1414 : let SmgrOpTimerState::Throttling {
1415 0 : throttle_started_at,
1416 0 : } = &inner.timings
1417 : else {
1418 0 : return;
1419 : };
1420 0 : inner.throttling.count_accounted_finish.inc();
1421 0 : match throttle {
1422 0 : ThrottleResult::NotThrottled { end } => {
1423 0 : inner.timings = SmgrOpTimerState::Batching {
1424 0 : throttle_done_at: end,
1425 0 : };
1426 0 : }
1427 0 : ThrottleResult::Throttled { end } => {
1428 0 : // update metrics
1429 0 : inner.throttling.count_throttled.inc();
1430 0 : inner
1431 0 : .throttling
1432 0 : .wait_time
1433 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1434 0 : // state transition
1435 0 : inner.timings = SmgrOpTimerState::Batching {
1436 0 : throttle_done_at: end,
1437 0 : };
1438 0 : }
1439 : }
1440 0 : }
1441 :
1442 : /// See [`SmgrOpTimerState`] for more context.
1443 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1444 0 : let Some(inner) = self.0.as_mut() else {
1445 0 : return;
1446 : };
1447 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1448 0 : return;
1449 : };
1450 : // update metrics
1451 0 : let batch = at - *throttle_done_at;
1452 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1453 0 : inner
1454 0 : .per_timeline_batch_wait_time
1455 0 : .observe(batch.as_secs_f64());
1456 0 : // state transition
1457 0 : inner.timings = SmgrOpTimerState::Executing {
1458 0 : execution_started_at: at,
1459 0 : }
1460 0 : }
1461 :
1462 : /// For all but the first caller, this is a no-op.
1463 : /// The first callers receives Some, subsequent ones None.
1464 : ///
1465 : /// See [`SmgrOpTimerState`] for more context.
1466 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1467 : // NB: unlike the other observe_* methods, this one take()s.
1468 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1469 0 : let Some(mut inner) = self.0.take() else {
1470 0 : return None;
1471 : };
1472 : let SmgrOpTimerState::Executing {
1473 0 : execution_started_at,
1474 0 : } = &inner.timings
1475 : else {
1476 0 : return None;
1477 : };
1478 : // update metrics
1479 0 : let execution = at - *execution_started_at;
1480 0 : inner
1481 0 : .global_execution_latency_histo
1482 0 : .observe(execution.as_secs_f64());
1483 0 : if let Some(per_timeline_execution_latency_histo) =
1484 0 : &inner.per_timeline_execution_latency_histo
1485 0 : {
1486 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1487 0 : }
1488 :
1489 : // state transition
1490 0 : inner.timings = SmgrOpTimerState::Flushing;
1491 0 :
1492 0 : // return the flush in progress object which
1493 0 : // will do the remaining metrics updates
1494 0 : let SmgrOpTimerInner {
1495 0 : global_flush_in_progress_micros,
1496 0 : per_timeline_flush_in_progress_micros,
1497 0 : ..
1498 0 : } = inner;
1499 0 : Some(SmgrOpFlushInProgress {
1500 0 : global_micros: global_flush_in_progress_micros,
1501 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1502 0 : })
1503 0 : }
1504 : }
1505 :
1506 : /// The last stage of request processing is serializing and flushing the request
1507 : /// into the TCP connection. We want to make slow flushes observable
1508 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1509 : /// to periodically bump the metric.
1510 : ///
1511 : /// If in the future we decide that we're not interested in live updates, we can
1512 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1513 : /// and remove this struct from the code base.
1514 : pub(crate) struct SmgrOpFlushInProgress {
1515 : global_micros: IntCounter,
1516 : per_timeline_micros: IntCounter,
1517 : }
1518 :
1519 : impl Drop for SmgrOpTimer {
1520 0 : fn drop(&mut self) {
1521 0 : // In case of early drop, update any of the remaining metrics with
1522 0 : // observations so that (started,finished) counter pairs balance out
1523 0 : // and all counters on the latency path have the the same number of
1524 0 : // observations.
1525 0 : // It's technically lying and it would be better if each metric had
1526 0 : // a separate label or similar for cancelled requests.
1527 0 : // But we don't have that right now and counter pairs balancing
1528 0 : // out is useful when using the metrics in panels and whatnot.
1529 0 : let now = Instant::now();
1530 0 : self.observe_throttle_start(now);
1531 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1532 0 : self.observe_execution_start(now);
1533 0 : let maybe_flush_timer = self.observe_execution_end(now);
1534 0 : drop(maybe_flush_timer);
1535 0 : }
1536 : }
1537 :
1538 : impl SmgrOpFlushInProgress {
1539 : /// The caller must guarantee that `socket_fd`` outlives this function.
1540 0 : pub(crate) async fn measure<Fut, O>(
1541 0 : self,
1542 0 : started_at: Instant,
1543 0 : mut fut: Fut,
1544 0 : socket_fd: RawFd,
1545 0 : ) -> O
1546 0 : where
1547 0 : Fut: std::future::Future<Output = O>,
1548 0 : {
1549 0 : let mut fut = std::pin::pin!(fut);
1550 0 :
1551 0 : let mut logged = false;
1552 0 : let mut last_counter_increment_at = started_at;
1553 0 : let mut observe_guard = scopeguard::guard(
1554 0 : |is_timeout| {
1555 0 : let now = Instant::now();
1556 0 :
1557 0 : // Increment counter
1558 0 : {
1559 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1560 0 : self.global_micros
1561 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1562 0 : self.per_timeline_micros
1563 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1564 0 : last_counter_increment_at = now;
1565 0 : }
1566 0 :
1567 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1568 0 : if is_timeout || logged {
1569 0 : logged = true;
1570 0 : let elapsed_total = now - started_at;
1571 0 : let msg = if is_timeout {
1572 0 : "slow flush ongoing"
1573 : } else {
1574 0 : "slow flush completed or cancelled"
1575 : };
1576 :
1577 0 : let (inq, outq) = {
1578 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1579 0 : #[cfg(target_os = "linux")]
1580 0 : unsafe {
1581 0 : (
1582 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1583 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1584 0 : )
1585 0 : }
1586 0 : #[cfg(not(target_os = "linux"))]
1587 0 : {
1588 0 : _ = socket_fd; // appease unused lint on macOS
1589 0 : (-1, -1)
1590 0 : }
1591 0 : };
1592 0 :
1593 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1594 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1595 0 : }
1596 0 : },
1597 0 : |mut observe| {
1598 0 : observe(false);
1599 0 : },
1600 0 : );
1601 :
1602 : loop {
1603 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1604 0 : Ok(v) => return v,
1605 0 : Err(_timeout) => {
1606 0 : (*observe_guard)(true);
1607 0 : }
1608 : }
1609 : }
1610 0 : }
1611 : }
1612 :
1613 : #[derive(
1614 : Debug,
1615 : Clone,
1616 : Copy,
1617 : IntoStaticStr,
1618 : strum_macros::EnumCount,
1619 0 : strum_macros::EnumIter,
1620 : strum_macros::FromRepr,
1621 : enum_map::Enum,
1622 : )]
1623 : #[strum(serialize_all = "snake_case")]
1624 : pub enum SmgrQueryType {
1625 : GetRelExists,
1626 : GetRelSize,
1627 : GetPageAtLsn,
1628 : GetDbSize,
1629 : GetSlruSegment,
1630 : #[cfg(feature = "testing")]
1631 : Test,
1632 : }
1633 :
1634 : pub(crate) struct SmgrQueryTimePerTimeline {
1635 : global_started: [IntCounter; SmgrQueryType::COUNT],
1636 : global_latency: [Histogram; SmgrQueryType::COUNT],
1637 : per_timeline_getpage_started: IntCounter,
1638 : per_timeline_getpage_latency: Histogram,
1639 : global_batch_size: Histogram,
1640 : per_timeline_batch_size: Histogram,
1641 : global_flush_in_progress_micros: IntCounter,
1642 : per_timeline_flush_in_progress_micros: IntCounter,
1643 : global_batch_wait_time: Histogram,
1644 : per_timeline_batch_wait_time: Histogram,
1645 : throttling: Arc<tenant_throttling::Pagestream>,
1646 : }
1647 :
1648 412 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1649 412 : register_int_counter_vec!(
1650 412 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1651 412 : "pageserver_smgr_query_started_global_count",
1652 412 : "Number of smgr queries started, aggregated by query type.",
1653 412 : &["smgr_query_type"],
1654 412 : )
1655 412 : .expect("failed to define a metric")
1656 412 : });
1657 :
1658 412 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1659 412 : register_int_counter_vec!(
1660 412 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1661 412 : "pageserver_smgr_query_started_count",
1662 412 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1663 412 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1664 412 : )
1665 412 : .expect("failed to define a metric")
1666 412 : });
1667 :
1668 : // Alias so all histograms recording per-timeline smgr timings use the same buckets.
1669 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
1670 :
1671 412 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1672 412 : register_histogram_vec!(
1673 412 : "pageserver_smgr_query_seconds",
1674 412 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1675 412 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1676 412 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1677 412 : )
1678 412 : .expect("failed to define a metric")
1679 412 : });
1680 :
1681 412 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1682 412 : [
1683 412 : 1,
1684 412 : 10,
1685 412 : 20,
1686 412 : 40,
1687 412 : 60,
1688 412 : 80,
1689 412 : 100,
1690 412 : 200,
1691 412 : 300,
1692 412 : 400,
1693 412 : 500,
1694 412 : 600,
1695 412 : 700,
1696 412 : 800,
1697 412 : 900,
1698 412 : 1_000, // 1ms
1699 412 : 2_000,
1700 412 : 4_000,
1701 412 : 6_000,
1702 412 : 8_000,
1703 412 : 10_000, // 10ms
1704 412 : 20_000,
1705 412 : 40_000,
1706 412 : 60_000,
1707 412 : 80_000,
1708 412 : 100_000,
1709 412 : 200_000,
1710 412 : 400_000,
1711 412 : 600_000,
1712 412 : 800_000,
1713 412 : 1_000_000, // 1s
1714 412 : 2_000_000,
1715 412 : 4_000_000,
1716 412 : 6_000_000,
1717 412 : 8_000_000,
1718 412 : 10_000_000, // 10s
1719 412 : 20_000_000,
1720 412 : 50_000_000,
1721 412 : 100_000_000,
1722 412 : 200_000_000,
1723 412 : 1_000_000_000, // 1000s
1724 412 : ]
1725 412 : .into_iter()
1726 412 : .map(Duration::from_micros)
1727 16892 : .map(|d| d.as_secs_f64())
1728 412 : .collect()
1729 412 : });
1730 :
1731 412 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1732 412 : register_histogram_vec!(
1733 412 : "pageserver_smgr_query_seconds_global",
1734 412 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1735 412 : &["smgr_query_type"],
1736 412 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1737 412 : )
1738 412 : .expect("failed to define a metric")
1739 412 : });
1740 :
1741 412 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1742 412 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1743 13184 : .map(|v| v.into())
1744 412 : .collect()
1745 412 : });
1746 :
1747 412 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1748 412 : register_histogram!(
1749 412 : "pageserver_page_service_batch_size_global",
1750 412 : "Batch size of pageserver page service requests",
1751 412 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1752 412 : )
1753 412 : .expect("failed to define a metric")
1754 412 : });
1755 :
1756 412 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1757 412 : let mut buckets = Vec::new();
1758 2884 : for i in 0.. {
1759 2884 : let bucket = 1 << i;
1760 2884 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1761 412 : break;
1762 2472 : }
1763 2472 : buckets.push(bucket.into());
1764 : }
1765 412 : buckets
1766 412 : });
1767 :
1768 412 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1769 412 : register_histogram_vec!(
1770 412 : "pageserver_page_service_batch_size",
1771 412 : "Batch size of pageserver page service requests",
1772 412 : &["tenant_id", "shard_id", "timeline_id"],
1773 412 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1774 412 : )
1775 412 : .expect("failed to define a metric")
1776 412 : });
1777 :
1778 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1779 0 : register_int_gauge_vec!(
1780 0 : "pageserver_page_service_config_max_batch_size",
1781 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1782 0 : Labels expose more of the configuration parameters.",
1783 0 : &["mode", "execution"]
1784 0 : )
1785 0 : .expect("failed to define a metric")
1786 0 : });
1787 :
1788 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1789 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1790 0 : let (label_values, value) = match conf {
1791 0 : PageServicePipeliningConfig::Serial => (["serial", "-"], 1),
1792 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1793 0 : max_batch_size,
1794 0 : execution,
1795 0 : }) => {
1796 0 : let mode = "pipelined";
1797 0 : let execution = match execution {
1798 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1799 0 : "concurrent-futures"
1800 : }
1801 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1802 : };
1803 0 : ([mode, execution], max_batch_size.get())
1804 : }
1805 : };
1806 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1807 0 : .with_label_values(&label_values)
1808 0 : .set(value.try_into().unwrap());
1809 0 : }
1810 :
1811 412 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1812 412 : register_int_counter_vec!(
1813 412 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1814 412 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1815 412 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1816 412 : easily discoverable in monitoring. \
1817 412 : Hence, this is NOT a completion latency historgram.",
1818 412 : &["tenant_id", "shard_id", "timeline_id"],
1819 412 : )
1820 412 : .expect("failed to define a metric")
1821 412 : });
1822 :
1823 412 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
1824 412 : register_int_counter!(
1825 412 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
1826 412 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
1827 412 : )
1828 412 : .expect("failed to define a metric")
1829 412 : });
1830 :
1831 412 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1832 412 : register_histogram_vec!(
1833 412 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
1834 412 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
1835 412 : &["tenant_id", "shard_id", "timeline_id"],
1836 412 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1837 412 : )
1838 412 : .expect("failed to define a metric")
1839 412 : });
1840 :
1841 412 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1842 412 : register_histogram!(
1843 412 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
1844 412 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
1845 412 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
1846 412 : )
1847 412 : .expect("failed to define a metric")
1848 412 : });
1849 :
1850 : impl SmgrQueryTimePerTimeline {
1851 904 : pub(crate) fn new(
1852 904 : tenant_shard_id: &TenantShardId,
1853 904 : timeline_id: &TimelineId,
1854 904 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
1855 904 : ) -> Self {
1856 904 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1857 904 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1858 904 : let timeline_id = timeline_id.to_string();
1859 5424 : let global_started = std::array::from_fn(|i| {
1860 5424 : let op = SmgrQueryType::from_repr(i).unwrap();
1861 5424 : SMGR_QUERY_STARTED_GLOBAL
1862 5424 : .get_metric_with_label_values(&[op.into()])
1863 5424 : .unwrap()
1864 5424 : });
1865 5424 : let global_latency = std::array::from_fn(|i| {
1866 5424 : let op = SmgrQueryType::from_repr(i).unwrap();
1867 5424 : SMGR_QUERY_TIME_GLOBAL
1868 5424 : .get_metric_with_label_values(&[op.into()])
1869 5424 : .unwrap()
1870 5424 : });
1871 904 :
1872 904 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
1873 904 : .get_metric_with_label_values(&[
1874 904 : SmgrQueryType::GetPageAtLsn.into(),
1875 904 : &tenant_id,
1876 904 : &shard_slug,
1877 904 : &timeline_id,
1878 904 : ])
1879 904 : .unwrap();
1880 904 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1881 904 : .get_metric_with_label_values(&[
1882 904 : SmgrQueryType::GetPageAtLsn.into(),
1883 904 : &tenant_id,
1884 904 : &shard_slug,
1885 904 : &timeline_id,
1886 904 : ])
1887 904 : .unwrap();
1888 904 :
1889 904 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
1890 904 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
1891 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1892 904 : .unwrap();
1893 904 :
1894 904 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
1895 904 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
1896 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1897 904 : .unwrap();
1898 904 :
1899 904 : let global_flush_in_progress_micros =
1900 904 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
1901 904 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
1902 904 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1903 904 : .unwrap();
1904 904 :
1905 904 : Self {
1906 904 : global_started,
1907 904 : global_latency,
1908 904 : per_timeline_getpage_latency,
1909 904 : per_timeline_getpage_started,
1910 904 : global_batch_size,
1911 904 : per_timeline_batch_size,
1912 904 : global_flush_in_progress_micros,
1913 904 : per_timeline_flush_in_progress_micros,
1914 904 : global_batch_wait_time,
1915 904 : per_timeline_batch_wait_time,
1916 904 : throttling: pagestream_throttle_metrics,
1917 904 : }
1918 904 : }
1919 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
1920 0 : self.global_started[op as usize].inc();
1921 :
1922 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1923 0 : self.per_timeline_getpage_started.inc();
1924 0 : Some(self.per_timeline_getpage_latency.clone())
1925 : } else {
1926 0 : None
1927 : };
1928 :
1929 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
1930 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
1931 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
1932 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
1933 0 : per_timeline_flush_in_progress_micros: self
1934 0 : .per_timeline_flush_in_progress_micros
1935 0 : .clone(),
1936 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
1937 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
1938 0 : throttling: self.throttling.clone(),
1939 0 : timings: SmgrOpTimerState::Received { received_at },
1940 0 : }))
1941 0 : }
1942 :
1943 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
1944 0 : pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) {
1945 0 : self.global_batch_size.observe(batch_size as f64);
1946 0 : self.per_timeline_batch_size.observe(batch_size as f64);
1947 0 : }
1948 : }
1949 :
1950 : // keep in sync with control plane Go code so that we can validate
1951 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1952 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1953 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1954 0 : [
1955 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1956 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1957 0 : ]
1958 0 : .map(|ms| (ms as f64) / 1000.0)
1959 0 : });
1960 :
1961 : pub(crate) struct BasebackupQueryTime {
1962 : ok: Histogram,
1963 : error: Histogram,
1964 : client_error: Histogram,
1965 : }
1966 :
1967 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1968 0 : let vec = register_histogram_vec!(
1969 0 : "pageserver_basebackup_query_seconds",
1970 0 : "Histogram of basebackup queries durations, by result type",
1971 0 : &["result"],
1972 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1973 0 : )
1974 0 : .expect("failed to define a metric");
1975 0 : BasebackupQueryTime {
1976 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1977 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1978 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
1979 0 : }
1980 0 : });
1981 :
1982 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
1983 : parent: &'a BasebackupQueryTime,
1984 : start: std::time::Instant,
1985 : }
1986 :
1987 : impl BasebackupQueryTime {
1988 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
1989 0 : let start = Instant::now();
1990 0 : BasebackupQueryTimeOngoingRecording {
1991 0 : parent: self,
1992 0 : start,
1993 0 : }
1994 0 : }
1995 : }
1996 :
1997 : impl BasebackupQueryTimeOngoingRecording<'_> {
1998 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
1999 0 : let elapsed = self.start.elapsed().as_secs_f64();
2000 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
2001 0 : let metric = match res {
2002 0 : Ok(_) => &self.parent.ok,
2003 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
2004 0 : if is_expected_io_error(io_error) =>
2005 0 : {
2006 0 : &self.parent.client_error
2007 : }
2008 0 : Err(_) => &self.parent.error,
2009 : };
2010 0 : metric.observe(elapsed);
2011 0 : }
2012 : }
2013 :
2014 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2015 0 : register_int_counter_pair_vec!(
2016 0 : "pageserver_live_connections_started",
2017 0 : "Number of network connections that we started handling",
2018 0 : "pageserver_live_connections_finished",
2019 0 : "Number of network connections that we finished handling",
2020 0 : &["pageserver_connection_kind"]
2021 0 : )
2022 0 : .expect("failed to define a metric")
2023 0 : });
2024 :
2025 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2026 : pub(crate) enum ComputeCommandKind {
2027 : PageStreamV3,
2028 : PageStreamV2,
2029 : Basebackup,
2030 : Fullbackup,
2031 : LeaseLsn,
2032 : }
2033 :
2034 : pub(crate) struct ComputeCommandCounters {
2035 : map: EnumMap<ComputeCommandKind, IntCounter>,
2036 : }
2037 :
2038 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2039 0 : let inner = register_int_counter_vec!(
2040 0 : "pageserver_compute_commands",
2041 0 : "Number of compute -> pageserver commands processed",
2042 0 : &["command"]
2043 0 : )
2044 0 : .expect("failed to define a metric");
2045 0 :
2046 0 : ComputeCommandCounters {
2047 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2048 0 : let command = ComputeCommandKind::from_usize(i);
2049 0 : let command_str: &'static str = command.into();
2050 0 : inner.with_label_values(&[command_str])
2051 0 : })),
2052 0 : }
2053 0 : });
2054 :
2055 : impl ComputeCommandCounters {
2056 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2057 0 : &self.map[command]
2058 0 : }
2059 : }
2060 :
2061 : // remote storage metrics
2062 :
2063 404 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2064 404 : register_int_counter_pair_vec!(
2065 404 : "pageserver_remote_timeline_client_calls_started",
2066 404 : "Number of started calls to remote timeline client.",
2067 404 : "pageserver_remote_timeline_client_calls_finished",
2068 404 : "Number of finshed calls to remote timeline client.",
2069 404 : &[
2070 404 : "tenant_id",
2071 404 : "shard_id",
2072 404 : "timeline_id",
2073 404 : "file_kind",
2074 404 : "op_kind"
2075 404 : ],
2076 404 : )
2077 404 : .unwrap()
2078 404 : });
2079 :
2080 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2081 400 : Lazy::new(|| {
2082 400 : register_int_counter_vec!(
2083 400 : "pageserver_remote_timeline_client_bytes_started",
2084 400 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2085 400 : The increment happens when the operation is scheduled.",
2086 400 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2087 400 : )
2088 400 : .expect("failed to define a metric")
2089 400 : });
2090 :
2091 400 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2092 400 : register_int_counter_vec!(
2093 400 : "pageserver_remote_timeline_client_bytes_finished",
2094 400 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2095 400 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2096 400 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2097 400 : )
2098 400 : .expect("failed to define a metric")
2099 400 : });
2100 :
2101 : pub(crate) struct TenantManagerMetrics {
2102 : tenant_slots_attached: UIntGauge,
2103 : tenant_slots_secondary: UIntGauge,
2104 : tenant_slots_inprogress: UIntGauge,
2105 : pub(crate) tenant_slot_writes: IntCounter,
2106 : pub(crate) unexpected_errors: IntCounter,
2107 : }
2108 :
2109 : impl TenantManagerMetrics {
2110 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2111 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2112 4 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2113 4 : match slot {
2114 0 : TenantSlot::Attached(_) => {
2115 0 : self.tenant_slots_attached.inc();
2116 0 : }
2117 0 : TenantSlot::Secondary(_) => {
2118 0 : self.tenant_slots_secondary.inc();
2119 0 : }
2120 4 : TenantSlot::InProgress(_) => {
2121 4 : self.tenant_slots_inprogress.inc();
2122 4 : }
2123 : }
2124 4 : }
2125 :
2126 4 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2127 4 : match slot {
2128 4 : TenantSlot::Attached(_) => {
2129 4 : self.tenant_slots_attached.dec();
2130 4 : }
2131 0 : TenantSlot::Secondary(_) => {
2132 0 : self.tenant_slots_secondary.dec();
2133 0 : }
2134 0 : TenantSlot::InProgress(_) => {
2135 0 : self.tenant_slots_inprogress.dec();
2136 0 : }
2137 : }
2138 4 : }
2139 :
2140 : #[cfg(all(debug_assertions, not(test)))]
2141 0 : pub(crate) fn slots_total(&self) -> u64 {
2142 0 : self.tenant_slots_attached.get()
2143 0 : + self.tenant_slots_secondary.get()
2144 0 : + self.tenant_slots_inprogress.get()
2145 0 : }
2146 : }
2147 :
2148 4 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2149 4 : let tenant_slots = register_uint_gauge_vec!(
2150 4 : "pageserver_tenant_manager_slots",
2151 4 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2152 4 : &["mode"]
2153 4 : )
2154 4 : .expect("failed to define a metric");
2155 4 : TenantManagerMetrics {
2156 4 : tenant_slots_attached: tenant_slots
2157 4 : .get_metric_with_label_values(&["attached"])
2158 4 : .unwrap(),
2159 4 : tenant_slots_secondary: tenant_slots
2160 4 : .get_metric_with_label_values(&["secondary"])
2161 4 : .unwrap(),
2162 4 : tenant_slots_inprogress: tenant_slots
2163 4 : .get_metric_with_label_values(&["inprogress"])
2164 4 : .unwrap(),
2165 4 : tenant_slot_writes: register_int_counter!(
2166 4 : "pageserver_tenant_manager_slot_writes",
2167 4 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2168 4 : )
2169 4 : .expect("failed to define a metric"),
2170 4 : unexpected_errors: register_int_counter!(
2171 4 : "pageserver_tenant_manager_unexpected_errors_total",
2172 4 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2173 4 : )
2174 4 : .expect("failed to define a metric"),
2175 4 : }
2176 4 : });
2177 :
2178 : pub(crate) struct DeletionQueueMetrics {
2179 : pub(crate) keys_submitted: IntCounter,
2180 : pub(crate) keys_dropped: IntCounter,
2181 : pub(crate) keys_executed: IntCounter,
2182 : pub(crate) keys_validated: IntCounter,
2183 : pub(crate) dropped_lsn_updates: IntCounter,
2184 : pub(crate) unexpected_errors: IntCounter,
2185 : pub(crate) remote_errors: IntCounterVec,
2186 : }
2187 67 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2188 67 : DeletionQueueMetrics{
2189 67 :
2190 67 : keys_submitted: register_int_counter!(
2191 67 : "pageserver_deletion_queue_submitted_total",
2192 67 : "Number of objects submitted for deletion"
2193 67 : )
2194 67 : .expect("failed to define a metric"),
2195 67 :
2196 67 : keys_dropped: register_int_counter!(
2197 67 : "pageserver_deletion_queue_dropped_total",
2198 67 : "Number of object deletions dropped due to stale generation."
2199 67 : )
2200 67 : .expect("failed to define a metric"),
2201 67 :
2202 67 : keys_executed: register_int_counter!(
2203 67 : "pageserver_deletion_queue_executed_total",
2204 67 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2205 67 : )
2206 67 : .expect("failed to define a metric"),
2207 67 :
2208 67 : keys_validated: register_int_counter!(
2209 67 : "pageserver_deletion_queue_validated_total",
2210 67 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2211 67 : )
2212 67 : .expect("failed to define a metric"),
2213 67 :
2214 67 : dropped_lsn_updates: register_int_counter!(
2215 67 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2216 67 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2217 67 : )
2218 67 : .expect("failed to define a metric"),
2219 67 : unexpected_errors: register_int_counter!(
2220 67 : "pageserver_deletion_queue_unexpected_errors_total",
2221 67 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2222 67 : )
2223 67 : .expect("failed to define a metric"),
2224 67 : remote_errors: register_int_counter_vec!(
2225 67 : "pageserver_deletion_queue_remote_errors_total",
2226 67 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2227 67 : &["op_kind"],
2228 67 : )
2229 67 : .expect("failed to define a metric")
2230 67 : }
2231 67 : });
2232 :
2233 : pub(crate) struct SecondaryModeMetrics {
2234 : pub(crate) upload_heatmap: IntCounter,
2235 : pub(crate) upload_heatmap_errors: IntCounter,
2236 : pub(crate) upload_heatmap_duration: Histogram,
2237 : pub(crate) download_heatmap: IntCounter,
2238 : pub(crate) download_layer: IntCounter,
2239 : }
2240 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2241 0 : SecondaryModeMetrics {
2242 0 : upload_heatmap: register_int_counter!(
2243 0 : "pageserver_secondary_upload_heatmap",
2244 0 : "Number of heatmaps written to remote storage by attached tenants"
2245 0 : )
2246 0 : .expect("failed to define a metric"),
2247 0 : upload_heatmap_errors: register_int_counter!(
2248 0 : "pageserver_secondary_upload_heatmap_errors",
2249 0 : "Failures writing heatmap to remote storage"
2250 0 : )
2251 0 : .expect("failed to define a metric"),
2252 0 : upload_heatmap_duration: register_histogram!(
2253 0 : "pageserver_secondary_upload_heatmap_duration",
2254 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2255 0 : )
2256 0 : .expect("failed to define a metric"),
2257 0 : download_heatmap: register_int_counter!(
2258 0 : "pageserver_secondary_download_heatmap",
2259 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2260 0 : )
2261 0 : .expect("failed to define a metric"),
2262 0 : download_layer: register_int_counter!(
2263 0 : "pageserver_secondary_download_layer",
2264 0 : "Number of downloads of layers by secondary mode locations"
2265 0 : )
2266 0 : .expect("failed to define a metric"),
2267 0 : }
2268 0 : });
2269 :
2270 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2271 0 : register_uint_gauge_vec!(
2272 0 : "pageserver_secondary_resident_physical_size",
2273 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2274 0 : &["tenant_id", "shard_id"]
2275 0 : )
2276 0 : .expect("failed to define a metric")
2277 0 : });
2278 :
2279 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2280 0 : register_uint_gauge!(
2281 0 : "pageserver_utilization_score",
2282 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2283 0 : )
2284 0 : .expect("failed to define a metric")
2285 0 : });
2286 :
2287 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2288 0 : register_uint_gauge_vec!(
2289 0 : "pageserver_secondary_heatmap_total_size",
2290 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2291 0 : &["tenant_id", "shard_id"]
2292 0 : )
2293 0 : .expect("failed to define a metric")
2294 0 : });
2295 :
2296 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2297 : pub enum RemoteOpKind {
2298 : Upload,
2299 : Download,
2300 : Delete,
2301 : }
2302 : impl RemoteOpKind {
2303 30585 : pub fn as_str(&self) -> &'static str {
2304 30585 : match self {
2305 28767 : Self::Upload => "upload",
2306 136 : Self::Download => "download",
2307 1682 : Self::Delete => "delete",
2308 : }
2309 30585 : }
2310 : }
2311 :
2312 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2313 : pub enum RemoteOpFileKind {
2314 : Layer,
2315 : Index,
2316 : }
2317 : impl RemoteOpFileKind {
2318 30585 : pub fn as_str(&self) -> &'static str {
2319 30585 : match self {
2320 21511 : Self::Layer => "layer",
2321 9074 : Self::Index => "index",
2322 : }
2323 30585 : }
2324 : }
2325 :
2326 397 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2327 397 : register_histogram_vec!(
2328 397 : "pageserver_remote_operation_seconds",
2329 397 : "Time spent on remote storage operations. \
2330 397 : Grouped by tenant, timeline, operation_kind and status. \
2331 397 : Does not account for time spent waiting in remote timeline client's queues.",
2332 397 : &["file_kind", "op_kind", "status"]
2333 397 : )
2334 397 : .expect("failed to define a metric")
2335 397 : });
2336 :
2337 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2338 0 : register_int_counter_vec!(
2339 0 : "pageserver_tenant_task_events",
2340 0 : "Number of task start/stop/fail events.",
2341 0 : &["event"],
2342 0 : )
2343 0 : .expect("Failed to register tenant_task_events metric")
2344 0 : });
2345 :
2346 : pub struct BackgroundLoopSemaphoreMetrics {
2347 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2348 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2349 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2350 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2351 : }
2352 :
2353 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2354 40 : Lazy::new(|| {
2355 40 : let counters = register_int_counter_pair_vec!(
2356 40 : "pageserver_background_loop_semaphore_wait_start_count",
2357 40 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2358 40 : "pageserver_background_loop_semaphore_wait_finish_count",
2359 40 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2360 40 : &["task"],
2361 40 : )
2362 40 : .unwrap();
2363 40 :
2364 40 : let durations = register_histogram_vec!(
2365 40 : "pageserver_background_loop_semaphore_wait_seconds",
2366 40 : "Seconds spent waiting on background loop semaphore acquisition",
2367 40 : &["task"],
2368 40 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2369 40 : )
2370 40 : .unwrap();
2371 40 :
2372 40 : let waiting_tasks = register_int_gauge_vec!(
2373 40 : "pageserver_background_loop_semaphore_waiting_tasks",
2374 40 : "Number of background loop tasks waiting for semaphore",
2375 40 : &["task"],
2376 40 : )
2377 40 : .unwrap();
2378 40 :
2379 40 : let running_tasks = register_int_gauge_vec!(
2380 40 : "pageserver_background_loop_semaphore_running_tasks",
2381 40 : "Number of background loop tasks running concurrently",
2382 40 : &["task"],
2383 40 : )
2384 40 : .unwrap();
2385 40 :
2386 40 : BackgroundLoopSemaphoreMetrics {
2387 400 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2388 400 : let kind = BackgroundLoopKind::from_usize(i);
2389 400 : counters.with_label_values(&[kind.into()])
2390 400 : })),
2391 400 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2392 400 : let kind = BackgroundLoopKind::from_usize(i);
2393 400 : durations.with_label_values(&[kind.into()])
2394 400 : })),
2395 400 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2396 400 : let kind = BackgroundLoopKind::from_usize(i);
2397 400 : waiting_tasks.with_label_values(&[kind.into()])
2398 400 : })),
2399 400 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2400 400 : let kind = BackgroundLoopKind::from_usize(i);
2401 400 : running_tasks.with_label_values(&[kind.into()])
2402 400 : })),
2403 40 : }
2404 40 : });
2405 :
2406 : impl BackgroundLoopSemaphoreMetrics {
2407 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2408 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2409 725 : pub(crate) fn record(
2410 725 : &self,
2411 725 : task: BackgroundLoopKind,
2412 725 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2413 725 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2414 725 : }
2415 : }
2416 :
2417 : /// Records metrics for a background task.
2418 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2419 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2420 : task: BackgroundLoopKind,
2421 : start: Instant,
2422 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2423 : }
2424 :
2425 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2426 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2427 : /// `wait_start_count` and `waiting_tasks`.
2428 725 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2429 725 : metrics.waiting_tasks[task].inc();
2430 725 : Self {
2431 725 : metrics,
2432 725 : task,
2433 725 : start: Instant::now(),
2434 725 : wait_counter_guard: Some(metrics.counters[task].guard()),
2435 725 : }
2436 725 : }
2437 :
2438 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2439 725 : pub fn acquired(&mut self) -> Duration {
2440 725 : let waited = self.start.elapsed();
2441 725 : self.wait_counter_guard.take().expect("already acquired");
2442 725 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2443 725 : self.metrics.waiting_tasks[self.task].dec();
2444 725 : self.metrics.running_tasks[self.task].inc();
2445 725 : waited
2446 725 : }
2447 : }
2448 :
2449 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2450 : /// The task either completed or was cancelled.
2451 725 : fn drop(&mut self) {
2452 725 : if self.wait_counter_guard.take().is_some() {
2453 0 : // Waiting.
2454 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2455 0 : self.metrics.waiting_tasks[self.task].dec();
2456 725 : } else {
2457 725 : // Running.
2458 725 : self.metrics.running_tasks[self.task].dec();
2459 725 : }
2460 725 : }
2461 : }
2462 :
2463 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2464 0 : register_int_counter_vec!(
2465 0 : "pageserver_background_loop_period_overrun_count",
2466 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2467 0 : &["task", "period"],
2468 0 : )
2469 0 : .expect("failed to define a metric")
2470 0 : });
2471 :
2472 : // walreceiver metrics
2473 :
2474 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2475 0 : register_int_counter!(
2476 0 : "pageserver_walreceiver_started_connections_total",
2477 0 : "Number of started walreceiver connections"
2478 0 : )
2479 0 : .expect("failed to define a metric")
2480 0 : });
2481 :
2482 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2483 0 : register_int_gauge!(
2484 0 : "pageserver_walreceiver_active_managers",
2485 0 : "Number of active walreceiver managers"
2486 0 : )
2487 0 : .expect("failed to define a metric")
2488 0 : });
2489 :
2490 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2491 0 : register_int_counter_vec!(
2492 0 : "pageserver_walreceiver_switches_total",
2493 0 : "Number of walreceiver manager change_connection calls",
2494 0 : &["reason"]
2495 0 : )
2496 0 : .expect("failed to define a metric")
2497 0 : });
2498 :
2499 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2500 0 : register_int_counter!(
2501 0 : "pageserver_walreceiver_broker_updates_total",
2502 0 : "Number of received broker updates in walreceiver"
2503 0 : )
2504 0 : .expect("failed to define a metric")
2505 0 : });
2506 :
2507 4 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2508 4 : register_int_counter_vec!(
2509 4 : "pageserver_walreceiver_candidates_events_total",
2510 4 : "Number of walreceiver candidate events",
2511 4 : &["event"]
2512 4 : )
2513 4 : .expect("failed to define a metric")
2514 4 : });
2515 :
2516 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2517 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2518 :
2519 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2520 4 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2521 :
2522 : // Metrics collected on WAL redo operations
2523 : //
2524 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2525 : // for access to the postgres process ('wait') since there is only one for
2526 : // each tenant.
2527 :
2528 : /// Time buckets are small because we want to be able to measure the
2529 : /// smallest redo processing times. These buckets allow us to measure down
2530 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2531 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2532 : ///
2533 : /// Values up to 1s are recorded because metrics show that we have redo
2534 : /// durations and lock times larger than 0.250s.
2535 : macro_rules! redo_histogram_time_buckets {
2536 : () => {
2537 : vec![
2538 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2539 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2540 : 1.000_000,
2541 : ]
2542 : };
2543 : }
2544 :
2545 : /// While we're at it, also measure the amount of records replayed in each
2546 : /// operation. We have a global 'total replayed' counter, but that's not
2547 : /// as useful as 'what is the skew for how many records we replay in one
2548 : /// operation'.
2549 : macro_rules! redo_histogram_count_buckets {
2550 : () => {
2551 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2552 : };
2553 : }
2554 :
2555 : macro_rules! redo_bytes_histogram_count_buckets {
2556 : () => {
2557 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2558 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2559 : vec![
2560 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2561 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2562 : ]
2563 : };
2564 : }
2565 :
2566 : pub(crate) struct WalIngestMetrics {
2567 : pub(crate) bytes_received: IntCounter,
2568 : pub(crate) records_received: IntCounter,
2569 : pub(crate) records_observed: IntCounter,
2570 : pub(crate) records_committed: IntCounter,
2571 : pub(crate) records_filtered: IntCounter,
2572 : pub(crate) values_committed_metadata_images: IntCounter,
2573 : pub(crate) values_committed_metadata_deltas: IntCounter,
2574 : pub(crate) values_committed_data_images: IntCounter,
2575 : pub(crate) values_committed_data_deltas: IntCounter,
2576 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2577 : }
2578 :
2579 : impl WalIngestMetrics {
2580 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2581 0 : if stats.metadata_images > 0 {
2582 0 : self.values_committed_metadata_images
2583 0 : .inc_by(stats.metadata_images);
2584 0 : }
2585 0 : if stats.metadata_deltas > 0 {
2586 0 : self.values_committed_metadata_deltas
2587 0 : .inc_by(stats.metadata_deltas);
2588 0 : }
2589 0 : if stats.data_images > 0 {
2590 0 : self.values_committed_data_images.inc_by(stats.data_images);
2591 0 : }
2592 0 : if stats.data_deltas > 0 {
2593 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2594 0 : }
2595 0 : }
2596 : }
2597 :
2598 20 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2599 20 : let values_committed = register_int_counter_vec!(
2600 20 : "pageserver_wal_ingest_values_committed",
2601 20 : "Number of values committed to pageserver storage from WAL records",
2602 20 : &["class", "kind"],
2603 20 : )
2604 20 : .expect("failed to define a metric");
2605 20 :
2606 20 : WalIngestMetrics {
2607 20 : bytes_received: register_int_counter!(
2608 20 : "pageserver_wal_ingest_bytes_received",
2609 20 : "Bytes of WAL ingested from safekeepers",
2610 20 : )
2611 20 : .unwrap(),
2612 20 : records_received: register_int_counter!(
2613 20 : "pageserver_wal_ingest_records_received",
2614 20 : "Number of WAL records received from safekeepers"
2615 20 : )
2616 20 : .expect("failed to define a metric"),
2617 20 : records_observed: register_int_counter!(
2618 20 : "pageserver_wal_ingest_records_observed",
2619 20 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2620 20 : )
2621 20 : .expect("failed to define a metric"),
2622 20 : records_committed: register_int_counter!(
2623 20 : "pageserver_wal_ingest_records_committed",
2624 20 : "Number of WAL records which resulted in writes to pageserver storage"
2625 20 : )
2626 20 : .expect("failed to define a metric"),
2627 20 : records_filtered: register_int_counter!(
2628 20 : "pageserver_wal_ingest_records_filtered",
2629 20 : "Number of WAL records filtered out due to sharding"
2630 20 : )
2631 20 : .expect("failed to define a metric"),
2632 20 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2633 20 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2634 20 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2635 20 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2636 20 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2637 20 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2638 20 : "Total number of zero gap blocks written on relation extends"
2639 20 : )
2640 20 : .expect("failed to define a metric"),
2641 20 : }
2642 20 : });
2643 :
2644 412 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2645 412 : register_int_counter_vec!(
2646 412 : "pageserver_timeline_wal_records_received",
2647 412 : "Number of WAL records received per shard",
2648 412 : &["tenant_id", "shard_id", "timeline_id"]
2649 412 : )
2650 412 : .expect("failed to define a metric")
2651 412 : });
2652 :
2653 12 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2654 12 : register_histogram!(
2655 12 : "pageserver_wal_redo_seconds",
2656 12 : "Time spent on WAL redo",
2657 12 : redo_histogram_time_buckets!()
2658 12 : )
2659 12 : .expect("failed to define a metric")
2660 12 : });
2661 :
2662 12 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2663 12 : register_histogram!(
2664 12 : "pageserver_wal_redo_records_histogram",
2665 12 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2666 12 : redo_histogram_count_buckets!(),
2667 12 : )
2668 12 : .expect("failed to define a metric")
2669 12 : });
2670 :
2671 12 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2672 12 : register_histogram!(
2673 12 : "pageserver_wal_redo_bytes_histogram",
2674 12 : "Histogram of number of records replayed per redo sent to Postgres",
2675 12 : redo_bytes_histogram_count_buckets!(),
2676 12 : )
2677 12 : .expect("failed to define a metric")
2678 12 : });
2679 :
2680 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2681 12 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2682 12 : register_int_counter!(
2683 12 : "pageserver_replayed_wal_records_total",
2684 12 : "Number of WAL records replayed in WAL redo process"
2685 12 : )
2686 12 : .unwrap()
2687 12 : });
2688 :
2689 : #[rustfmt::skip]
2690 16 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2691 16 : register_histogram!(
2692 16 : "pageserver_wal_redo_process_launch_duration",
2693 16 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2694 16 : vec![
2695 16 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2696 16 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2697 16 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2698 16 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2699 16 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2700 16 : ],
2701 16 : )
2702 16 : .expect("failed to define a metric")
2703 16 : });
2704 :
2705 : pub(crate) struct WalRedoProcessCounters {
2706 : pub(crate) started: IntCounter,
2707 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2708 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2709 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2710 : }
2711 :
2712 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2713 : pub(crate) enum WalRedoKillCause {
2714 : WalRedoProcessDrop,
2715 : NoLeakChildDrop,
2716 : Startup,
2717 : }
2718 :
2719 : impl Default for WalRedoProcessCounters {
2720 16 : fn default() -> Self {
2721 16 : let started = register_int_counter!(
2722 16 : "pageserver_wal_redo_process_started_total",
2723 16 : "Number of WAL redo processes started",
2724 16 : )
2725 16 : .unwrap();
2726 16 :
2727 16 : let killed = register_int_counter_vec!(
2728 16 : "pageserver_wal_redo_process_stopped_total",
2729 16 : "Number of WAL redo processes stopped",
2730 16 : &["cause"],
2731 16 : )
2732 16 : .unwrap();
2733 16 :
2734 16 : let active_stderr_logger_tasks_started = register_int_counter!(
2735 16 : "pageserver_walredo_stderr_logger_tasks_started_total",
2736 16 : "Number of active walredo stderr logger tasks that have started",
2737 16 : )
2738 16 : .unwrap();
2739 16 :
2740 16 : let active_stderr_logger_tasks_finished = register_int_counter!(
2741 16 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2742 16 : "Number of active walredo stderr logger tasks that have finished",
2743 16 : )
2744 16 : .unwrap();
2745 16 :
2746 16 : Self {
2747 16 : started,
2748 48 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2749 48 : let cause = WalRedoKillCause::from_usize(i);
2750 48 : let cause_str: &'static str = cause.into();
2751 48 : killed.with_label_values(&[cause_str])
2752 48 : })),
2753 16 : active_stderr_logger_tasks_started,
2754 16 : active_stderr_logger_tasks_finished,
2755 16 : }
2756 16 : }
2757 : }
2758 :
2759 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2760 : Lazy::new(WalRedoProcessCounters::default);
2761 :
2762 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2763 : pub(crate) struct StorageTimeMetricsTimer {
2764 : metrics: StorageTimeMetrics,
2765 : start: Instant,
2766 : }
2767 :
2768 : impl StorageTimeMetricsTimer {
2769 4254 : fn new(metrics: StorageTimeMetrics) -> Self {
2770 4254 : Self {
2771 4254 : metrics,
2772 4254 : start: Instant::now(),
2773 4254 : }
2774 4254 : }
2775 :
2776 : /// Returns the elapsed duration of the timer.
2777 4254 : pub fn elapsed(&self) -> Duration {
2778 4254 : self.start.elapsed()
2779 4254 : }
2780 :
2781 : /// Record the time from creation to now and return it.
2782 4254 : pub fn stop_and_record(self) -> Duration {
2783 4254 : let duration = self.elapsed();
2784 4254 : let seconds = duration.as_secs_f64();
2785 4254 : self.metrics.timeline_sum.inc_by(seconds);
2786 4254 : self.metrics.timeline_count.inc();
2787 4254 : self.metrics.global_histogram.observe(seconds);
2788 4254 : duration
2789 4254 : }
2790 :
2791 : /// Turns this timer into a timer, which will always record -- usually this means recording
2792 : /// regardless an early `?` path was taken in a function.
2793 8 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2794 8 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2795 8 : }
2796 : }
2797 :
2798 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2799 :
2800 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2801 8 : fn drop(&mut self) {
2802 8 : if let Some(inner) = self.0.take() {
2803 8 : inner.stop_and_record();
2804 8 : }
2805 8 : }
2806 : }
2807 :
2808 : impl AlwaysRecordingStorageTimeMetricsTimer {
2809 : /// Returns the elapsed duration of the timer.
2810 0 : pub fn elapsed(&self) -> Duration {
2811 0 : self.0.as_ref().expect("not dropped yet").elapsed()
2812 0 : }
2813 : }
2814 :
2815 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2816 : /// timeline total sum and count.
2817 : #[derive(Clone, Debug)]
2818 : pub(crate) struct StorageTimeMetrics {
2819 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2820 : timeline_sum: Counter,
2821 : /// Number of oeprations, per operation, tenant_id and timeline_id
2822 : timeline_count: IntCounter,
2823 : /// Global histogram having only the "operation" label.
2824 : global_histogram: Histogram,
2825 : }
2826 :
2827 : impl StorageTimeMetrics {
2828 8136 : pub fn new(
2829 8136 : operation: StorageTimeOperation,
2830 8136 : tenant_id: &str,
2831 8136 : shard_id: &str,
2832 8136 : timeline_id: &str,
2833 8136 : ) -> Self {
2834 8136 : let operation: &'static str = operation.into();
2835 8136 :
2836 8136 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2837 8136 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2838 8136 : .unwrap();
2839 8136 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2840 8136 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2841 8136 : .unwrap();
2842 8136 : let global_histogram = STORAGE_TIME_GLOBAL
2843 8136 : .get_metric_with_label_values(&[operation])
2844 8136 : .unwrap();
2845 8136 :
2846 8136 : StorageTimeMetrics {
2847 8136 : timeline_sum,
2848 8136 : timeline_count,
2849 8136 : global_histogram,
2850 8136 : }
2851 8136 : }
2852 :
2853 : /// Starts timing a new operation.
2854 : ///
2855 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2856 4254 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2857 4254 : StorageTimeMetricsTimer::new(self.clone())
2858 4254 : }
2859 : }
2860 :
2861 : pub(crate) struct TimelineMetrics {
2862 : tenant_id: String,
2863 : shard_id: String,
2864 : timeline_id: String,
2865 : pub flush_time_histo: StorageTimeMetrics,
2866 : pub flush_delay_histo: StorageTimeMetrics,
2867 : pub flush_wait_upload_time_gauge: Gauge,
2868 : pub compact_time_histo: StorageTimeMetrics,
2869 : pub create_images_time_histo: StorageTimeMetrics,
2870 : pub logical_size_histo: StorageTimeMetrics,
2871 : pub imitate_logical_size_histo: StorageTimeMetrics,
2872 : pub load_layer_map_histo: StorageTimeMetrics,
2873 : pub garbage_collect_histo: StorageTimeMetrics,
2874 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2875 : pub last_record_lsn_gauge: IntGauge,
2876 : pub disk_consistent_lsn_gauge: IntGauge,
2877 : pub pitr_history_size: UIntGauge,
2878 : pub archival_size: UIntGauge,
2879 : pub layers_per_read: Histogram,
2880 : pub standby_horizon_gauge: IntGauge,
2881 : pub resident_physical_size_gauge: UIntGauge,
2882 : pub visible_physical_size_gauge: UIntGauge,
2883 : /// copy of LayeredTimeline.current_logical_size
2884 : pub current_logical_size_gauge: UIntGauge,
2885 : pub aux_file_size_gauge: IntGauge,
2886 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2887 : pub evictions: IntCounter,
2888 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2889 : /// Number of valid LSN leases.
2890 : pub valid_lsn_lease_count_gauge: UIntGauge,
2891 : pub wal_records_received: IntCounter,
2892 : pub storage_io_size: StorageIoSizeMetrics,
2893 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
2894 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
2895 : shutdown: std::sync::atomic::AtomicBool,
2896 : }
2897 :
2898 : impl TimelineMetrics {
2899 904 : pub fn new(
2900 904 : tenant_shard_id: &TenantShardId,
2901 904 : timeline_id_raw: &TimelineId,
2902 904 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2903 904 : ) -> Self {
2904 904 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2905 904 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2906 904 : let timeline_id = timeline_id_raw.to_string();
2907 904 : let flush_time_histo = StorageTimeMetrics::new(
2908 904 : StorageTimeOperation::LayerFlush,
2909 904 : &tenant_id,
2910 904 : &shard_id,
2911 904 : &timeline_id,
2912 904 : );
2913 904 : let flush_delay_histo = StorageTimeMetrics::new(
2914 904 : StorageTimeOperation::LayerFlushDelay,
2915 904 : &tenant_id,
2916 904 : &shard_id,
2917 904 : &timeline_id,
2918 904 : );
2919 904 : let flush_wait_upload_time_gauge = FLUSH_WAIT_UPLOAD_TIME
2920 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2921 904 : .unwrap();
2922 904 : let compact_time_histo = StorageTimeMetrics::new(
2923 904 : StorageTimeOperation::Compact,
2924 904 : &tenant_id,
2925 904 : &shard_id,
2926 904 : &timeline_id,
2927 904 : );
2928 904 : let create_images_time_histo = StorageTimeMetrics::new(
2929 904 : StorageTimeOperation::CreateImages,
2930 904 : &tenant_id,
2931 904 : &shard_id,
2932 904 : &timeline_id,
2933 904 : );
2934 904 : let logical_size_histo = StorageTimeMetrics::new(
2935 904 : StorageTimeOperation::LogicalSize,
2936 904 : &tenant_id,
2937 904 : &shard_id,
2938 904 : &timeline_id,
2939 904 : );
2940 904 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2941 904 : StorageTimeOperation::ImitateLogicalSize,
2942 904 : &tenant_id,
2943 904 : &shard_id,
2944 904 : &timeline_id,
2945 904 : );
2946 904 : let load_layer_map_histo = StorageTimeMetrics::new(
2947 904 : StorageTimeOperation::LoadLayerMap,
2948 904 : &tenant_id,
2949 904 : &shard_id,
2950 904 : &timeline_id,
2951 904 : );
2952 904 : let garbage_collect_histo = StorageTimeMetrics::new(
2953 904 : StorageTimeOperation::Gc,
2954 904 : &tenant_id,
2955 904 : &shard_id,
2956 904 : &timeline_id,
2957 904 : );
2958 904 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2959 904 : StorageTimeOperation::FindGcCutoffs,
2960 904 : &tenant_id,
2961 904 : &shard_id,
2962 904 : &timeline_id,
2963 904 : );
2964 904 : let last_record_lsn_gauge = LAST_RECORD_LSN
2965 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2966 904 : .unwrap();
2967 904 :
2968 904 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
2969 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2970 904 : .unwrap();
2971 904 :
2972 904 : let pitr_history_size = PITR_HISTORY_SIZE
2973 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2974 904 : .unwrap();
2975 904 :
2976 904 : let archival_size = TIMELINE_ARCHIVE_SIZE
2977 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2978 904 : .unwrap();
2979 904 :
2980 904 : let layers_per_read = LAYERS_PER_READ
2981 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2982 904 : .unwrap();
2983 904 :
2984 904 : let standby_horizon_gauge = STANDBY_HORIZON
2985 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2986 904 : .unwrap();
2987 904 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2988 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2989 904 : .unwrap();
2990 904 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
2991 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2992 904 : .unwrap();
2993 904 : // TODO: we shouldn't expose this metric
2994 904 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2995 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2996 904 : .unwrap();
2997 904 : let aux_file_size_gauge = AUX_FILE_SIZE
2998 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2999 904 : .unwrap();
3000 904 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
3001 904 : let directory_entries_count_gauge_closure = {
3002 904 : let tenant_shard_id = *tenant_shard_id;
3003 904 : let timeline_id_raw = *timeline_id_raw;
3004 0 : move || {
3005 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3006 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3007 0 : let timeline_id = timeline_id_raw.to_string();
3008 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
3009 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3010 0 : .unwrap();
3011 0 : gauge
3012 0 : }
3013 : };
3014 904 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3015 904 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3016 904 : let evictions = EVICTIONS
3017 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3018 904 : .unwrap();
3019 904 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3020 904 : .build(&tenant_id, &shard_id, &timeline_id);
3021 904 :
3022 904 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3023 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3024 904 : .unwrap();
3025 904 :
3026 904 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3027 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3028 904 : .unwrap();
3029 904 :
3030 904 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3031 904 :
3032 904 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3033 904 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3034 904 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3035 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3036 904 : .unwrap(),
3037 904 : };
3038 904 :
3039 904 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3040 904 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3041 904 : .unwrap();
3042 904 :
3043 904 : TimelineMetrics {
3044 904 : tenant_id,
3045 904 : shard_id,
3046 904 : timeline_id,
3047 904 : flush_time_histo,
3048 904 : flush_delay_histo,
3049 904 : flush_wait_upload_time_gauge,
3050 904 : compact_time_histo,
3051 904 : create_images_time_histo,
3052 904 : logical_size_histo,
3053 904 : imitate_logical_size_histo,
3054 904 : garbage_collect_histo,
3055 904 : find_gc_cutoffs_histo,
3056 904 : load_layer_map_histo,
3057 904 : last_record_lsn_gauge,
3058 904 : disk_consistent_lsn_gauge,
3059 904 : pitr_history_size,
3060 904 : archival_size,
3061 904 : layers_per_read,
3062 904 : standby_horizon_gauge,
3063 904 : resident_physical_size_gauge,
3064 904 : visible_physical_size_gauge,
3065 904 : current_logical_size_gauge,
3066 904 : aux_file_size_gauge,
3067 904 : directory_entries_count_gauge,
3068 904 : evictions,
3069 904 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3070 904 : evictions_with_low_residence_duration,
3071 904 : ),
3072 904 : storage_io_size,
3073 904 : valid_lsn_lease_count_gauge,
3074 904 : wal_records_received,
3075 904 : wait_lsn_in_progress_micros,
3076 904 : wait_lsn_start_finish_counterpair,
3077 904 : shutdown: std::sync::atomic::AtomicBool::default(),
3078 904 : }
3079 904 : }
3080 :
3081 3156 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3082 3156 : self.resident_physical_size_add(sz);
3083 3156 : }
3084 :
3085 1089 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3086 1089 : self.resident_physical_size_gauge.sub(sz);
3087 1089 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3088 1089 : }
3089 :
3090 3428 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3091 3428 : self.resident_physical_size_gauge.add(sz);
3092 3428 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3093 3428 : }
3094 :
3095 20 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3096 20 : self.resident_physical_size_gauge.get()
3097 20 : }
3098 :
3099 0 : pub(crate) fn flush_wait_upload_time_gauge_add(&self, duration: f64) {
3100 0 : self.flush_wait_upload_time_gauge.add(duration);
3101 0 : crate::metrics::FLUSH_WAIT_UPLOAD_TIME
3102 0 : .get_metric_with_label_values(&[&self.tenant_id, &self.shard_id, &self.timeline_id])
3103 0 : .unwrap()
3104 0 : .add(duration);
3105 0 : }
3106 :
3107 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3108 5247 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3109 5247 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3110 2847 : true => LayerLevel::L0,
3111 2400 : false => LayerLevel::L1,
3112 : };
3113 5247 : let kind = match layer_desc.is_delta() {
3114 4363 : true => LayerKind::Delta,
3115 884 : false => LayerKind::Image,
3116 : };
3117 5247 : [
3118 5247 : &self.tenant_id,
3119 5247 : &self.shard_id,
3120 5247 : &self.timeline_id,
3121 5247 : level.into(),
3122 5247 : kind.into(),
3123 5247 : ]
3124 5247 : }
3125 :
3126 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3127 4712 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3128 4712 : [
3129 4712 : &self.tenant_id,
3130 4712 : &self.shard_id,
3131 4712 : &self.timeline_id,
3132 4712 : LayerLevel::Frozen.into(),
3133 4712 : LayerKind::Delta.into(), // by definition
3134 4712 : ]
3135 4712 : }
3136 :
3137 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3138 2356 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3139 2356 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3140 2356 : let labels = self.make_frozen_layer_labels(layer);
3141 2356 : let size = layer.try_len().expect("frozen layer should have no writer");
3142 2356 : TIMELINE_LAYER_COUNT
3143 2356 : .get_metric_with_label_values(&labels)
3144 2356 : .unwrap()
3145 2356 : .dec();
3146 2356 : TIMELINE_LAYER_SIZE
3147 2356 : .get_metric_with_label_values(&labels)
3148 2356 : .unwrap()
3149 2356 : .sub(size);
3150 2356 : }
3151 :
3152 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3153 2356 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3154 2356 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3155 2356 : let labels = self.make_frozen_layer_labels(layer);
3156 2356 : let size = layer.try_len().expect("frozen layer should have no writer");
3157 2356 : TIMELINE_LAYER_COUNT
3158 2356 : .get_metric_with_label_values(&labels)
3159 2356 : .unwrap()
3160 2356 : .inc();
3161 2356 : TIMELINE_LAYER_SIZE
3162 2356 : .get_metric_with_label_values(&labels)
3163 2356 : .unwrap()
3164 2356 : .add(size);
3165 2356 : }
3166 :
3167 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3168 1387 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3169 1387 : let labels = self.make_layer_labels(layer_desc);
3170 1387 : TIMELINE_LAYER_COUNT
3171 1387 : .get_metric_with_label_values(&labels)
3172 1387 : .unwrap()
3173 1387 : .dec();
3174 1387 : TIMELINE_LAYER_SIZE
3175 1387 : .get_metric_with_label_values(&labels)
3176 1387 : .unwrap()
3177 1387 : .sub(layer_desc.file_size);
3178 1387 : }
3179 :
3180 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3181 3860 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3182 3860 : let labels = self.make_layer_labels(layer_desc);
3183 3860 : TIMELINE_LAYER_COUNT
3184 3860 : .get_metric_with_label_values(&labels)
3185 3860 : .unwrap()
3186 3860 : .inc();
3187 3860 : TIMELINE_LAYER_SIZE
3188 3860 : .get_metric_with_label_values(&labels)
3189 3860 : .unwrap()
3190 3860 : .add(layer_desc.file_size);
3191 3860 : }
3192 :
3193 20 : pub(crate) fn shutdown(&self) {
3194 20 : let was_shutdown = self
3195 20 : .shutdown
3196 20 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3197 20 :
3198 20 : if was_shutdown {
3199 : // this happens on tenant deletion because tenant first shuts down timelines, then
3200 : // invokes timeline deletion which first shuts down the timeline again.
3201 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3202 0 : return;
3203 20 : }
3204 20 :
3205 20 : let tenant_id = &self.tenant_id;
3206 20 : let timeline_id = &self.timeline_id;
3207 20 : let shard_id = &self.shard_id;
3208 20 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3209 20 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3210 20 : let _ = FLUSH_WAIT_UPLOAD_TIME.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3211 20 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3212 20 : {
3213 20 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3214 20 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3215 20 : }
3216 20 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3217 20 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3218 20 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3219 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3220 20 : }
3221 :
3222 20 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3223 20 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3224 :
3225 80 : for ref level in LayerLevel::iter() {
3226 180 : for ref kind in LayerKind::iter() {
3227 120 : let labels: [&str; 5] =
3228 120 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3229 120 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3230 120 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3231 120 : }
3232 : }
3233 :
3234 20 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3235 20 :
3236 20 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3237 20 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3238 20 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3239 20 :
3240 20 : self.evictions_with_low_residence_duration
3241 20 : .write()
3242 20 : .unwrap()
3243 20 : .remove(tenant_id, shard_id, timeline_id);
3244 :
3245 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3246 : // removed at the end of it. The idea is to have the metrics outlive the
3247 : // entity during which they're observed, e.g., the smgr metrics shall
3248 : // outlive an individual smgr connection, but not the timeline.
3249 :
3250 200 : for op in StorageTimeOperation::VARIANTS {
3251 180 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3252 180 : op,
3253 180 : tenant_id,
3254 180 : shard_id,
3255 180 : timeline_id,
3256 180 : ]);
3257 180 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3258 180 : op,
3259 180 : tenant_id,
3260 180 : shard_id,
3261 180 : timeline_id,
3262 180 : ]);
3263 180 : }
3264 :
3265 60 : for op in StorageIoSizeOperation::VARIANTS {
3266 40 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3267 40 : }
3268 :
3269 : let _ =
3270 20 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3271 20 :
3272 20 : {
3273 20 : let mut res = [Ok(()), Ok(())];
3274 20 : WAIT_LSN_START_FINISH_COUNTERPAIR
3275 20 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3276 20 : }
3277 20 :
3278 20 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3279 20 : SmgrQueryType::GetPageAtLsn.into(),
3280 20 : tenant_id,
3281 20 : shard_id,
3282 20 : timeline_id,
3283 20 : ]);
3284 20 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3285 20 : SmgrQueryType::GetPageAtLsn.into(),
3286 20 : tenant_id,
3287 20 : shard_id,
3288 20 : timeline_id,
3289 20 : ]);
3290 20 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3291 20 : tenant_id,
3292 20 : shard_id,
3293 20 : timeline_id,
3294 20 : ]);
3295 20 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3296 20 : tenant_id,
3297 20 : shard_id,
3298 20 : timeline_id,
3299 20 : ]);
3300 20 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3301 20 : tenant_id,
3302 20 : shard_id,
3303 20 : timeline_id,
3304 20 : ]);
3305 20 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3306 20 : tenant_id,
3307 20 : shard_id,
3308 20 : timeline_id,
3309 20 : ]);
3310 20 : }
3311 : }
3312 :
3313 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3314 12 : // Only shard zero deals in synthetic sizes
3315 12 : if tenant_shard_id.is_shard_zero() {
3316 12 : let tid = tenant_shard_id.tenant_id.to_string();
3317 12 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3318 12 : }
3319 :
3320 12 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3321 12 :
3322 12 : // we leave the BROKEN_TENANTS_SET entry if any
3323 12 : }
3324 :
3325 : /// Maintain a per timeline gauge in addition to the global gauge.
3326 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3327 : last_set: AtomicU64,
3328 : gauge: UIntGauge,
3329 : }
3330 :
3331 : impl PerTimelineRemotePhysicalSizeGauge {
3332 924 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3333 924 : Self {
3334 924 : last_set: AtomicU64::new(0),
3335 924 : gauge: per_timeline_gauge,
3336 924 : }
3337 924 : }
3338 3849 : pub(crate) fn set(&self, sz: u64) {
3339 3849 : self.gauge.set(sz);
3340 3849 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3341 3849 : if sz < prev {
3342 75 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3343 3774 : } else {
3344 3774 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3345 3774 : };
3346 3849 : }
3347 4 : pub(crate) fn get(&self) -> u64 {
3348 4 : self.gauge.get()
3349 4 : }
3350 : }
3351 :
3352 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3353 40 : fn drop(&mut self) {
3354 40 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3355 40 : }
3356 : }
3357 :
3358 : pub(crate) struct RemoteTimelineClientMetrics {
3359 : tenant_id: String,
3360 : shard_id: String,
3361 : timeline_id: String,
3362 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3363 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3364 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3365 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3366 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3367 : }
3368 :
3369 : impl RemoteTimelineClientMetrics {
3370 924 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3371 924 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3372 924 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3373 924 : let timeline_id_str = timeline_id.to_string();
3374 924 :
3375 924 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3376 924 : REMOTE_PHYSICAL_SIZE
3377 924 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3378 924 : .unwrap(),
3379 924 : );
3380 924 :
3381 924 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3382 924 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3383 924 : .unwrap();
3384 924 :
3385 924 : RemoteTimelineClientMetrics {
3386 924 : tenant_id: tenant_id_str,
3387 924 : shard_id: shard_id_str,
3388 924 : timeline_id: timeline_id_str,
3389 924 : calls: Mutex::new(HashMap::default()),
3390 924 : bytes_started_counter: Mutex::new(HashMap::default()),
3391 924 : bytes_finished_counter: Mutex::new(HashMap::default()),
3392 924 : remote_physical_size_gauge,
3393 924 : projected_remote_consistent_lsn_gauge,
3394 924 : }
3395 924 : }
3396 :
3397 6112 : pub fn remote_operation_time(
3398 6112 : &self,
3399 6112 : file_kind: &RemoteOpFileKind,
3400 6112 : op_kind: &RemoteOpKind,
3401 6112 : status: &'static str,
3402 6112 : ) -> Histogram {
3403 6112 : let key = (file_kind.as_str(), op_kind.as_str(), status);
3404 6112 : REMOTE_OPERATION_TIME
3405 6112 : .get_metric_with_label_values(&[key.0, key.1, key.2])
3406 6112 : .unwrap()
3407 6112 : }
3408 :
3409 14382 : fn calls_counter_pair(
3410 14382 : &self,
3411 14382 : file_kind: &RemoteOpFileKind,
3412 14382 : op_kind: &RemoteOpKind,
3413 14382 : ) -> IntCounterPair {
3414 14382 : let mut guard = self.calls.lock().unwrap();
3415 14382 : let key = (file_kind.as_str(), op_kind.as_str());
3416 14382 : let metric = guard.entry(key).or_insert_with(move || {
3417 1663 : REMOTE_TIMELINE_CLIENT_CALLS
3418 1663 : .get_metric_with_label_values(&[
3419 1663 : &self.tenant_id,
3420 1663 : &self.shard_id,
3421 1663 : &self.timeline_id,
3422 1663 : key.0,
3423 1663 : key.1,
3424 1663 : ])
3425 1663 : .unwrap()
3426 14382 : });
3427 14382 : metric.clone()
3428 14382 : }
3429 :
3430 3484 : fn bytes_started_counter(
3431 3484 : &self,
3432 3484 : file_kind: &RemoteOpFileKind,
3433 3484 : op_kind: &RemoteOpKind,
3434 3484 : ) -> IntCounter {
3435 3484 : let mut guard = self.bytes_started_counter.lock().unwrap();
3436 3484 : let key = (file_kind.as_str(), op_kind.as_str());
3437 3484 : let metric = guard.entry(key).or_insert_with(move || {
3438 656 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3439 656 : .get_metric_with_label_values(&[
3440 656 : &self.tenant_id,
3441 656 : &self.shard_id,
3442 656 : &self.timeline_id,
3443 656 : key.0,
3444 656 : key.1,
3445 656 : ])
3446 656 : .unwrap()
3447 3484 : });
3448 3484 : metric.clone()
3449 3484 : }
3450 :
3451 6583 : fn bytes_finished_counter(
3452 6583 : &self,
3453 6583 : file_kind: &RemoteOpFileKind,
3454 6583 : op_kind: &RemoteOpKind,
3455 6583 : ) -> IntCounter {
3456 6583 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3457 6583 : let key = (file_kind.as_str(), op_kind.as_str());
3458 6583 : let metric = guard.entry(key).or_insert_with(move || {
3459 656 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3460 656 : .get_metric_with_label_values(&[
3461 656 : &self.tenant_id,
3462 656 : &self.shard_id,
3463 656 : &self.timeline_id,
3464 656 : key.0,
3465 656 : key.1,
3466 656 : ])
3467 656 : .unwrap()
3468 6583 : });
3469 6583 : metric.clone()
3470 6583 : }
3471 : }
3472 :
3473 : #[cfg(test)]
3474 : impl RemoteTimelineClientMetrics {
3475 12 : pub fn get_bytes_started_counter_value(
3476 12 : &self,
3477 12 : file_kind: &RemoteOpFileKind,
3478 12 : op_kind: &RemoteOpKind,
3479 12 : ) -> Option<u64> {
3480 12 : let guard = self.bytes_started_counter.lock().unwrap();
3481 12 : let key = (file_kind.as_str(), op_kind.as_str());
3482 12 : guard.get(&key).map(|counter| counter.get())
3483 12 : }
3484 :
3485 12 : pub fn get_bytes_finished_counter_value(
3486 12 : &self,
3487 12 : file_kind: &RemoteOpFileKind,
3488 12 : op_kind: &RemoteOpKind,
3489 12 : ) -> Option<u64> {
3490 12 : let guard = self.bytes_finished_counter.lock().unwrap();
3491 12 : let key = (file_kind.as_str(), op_kind.as_str());
3492 12 : guard.get(&key).map(|counter| counter.get())
3493 12 : }
3494 : }
3495 :
3496 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3497 : #[must_use]
3498 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3499 : /// Decremented on drop.
3500 : calls_counter_pair: Option<IntCounterPair>,
3501 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3502 : bytes_finished: Option<(IntCounter, u64)>,
3503 : }
3504 :
3505 : impl RemoteTimelineClientCallMetricGuard {
3506 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3507 : /// The caller vouches to do the metric updates manually.
3508 7601 : pub fn will_decrement_manually(mut self) {
3509 7601 : let RemoteTimelineClientCallMetricGuard {
3510 7601 : calls_counter_pair,
3511 7601 : bytes_finished,
3512 7601 : } = &mut self;
3513 7601 : calls_counter_pair.take();
3514 7601 : bytes_finished.take();
3515 7601 : }
3516 : }
3517 :
3518 : impl Drop for RemoteTimelineClientCallMetricGuard {
3519 7669 : fn drop(&mut self) {
3520 7669 : let RemoteTimelineClientCallMetricGuard {
3521 7669 : calls_counter_pair,
3522 7669 : bytes_finished,
3523 7669 : } = self;
3524 7669 : if let Some(guard) = calls_counter_pair.take() {
3525 68 : guard.dec();
3526 7601 : }
3527 7669 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3528 0 : bytes_finished_metric.inc_by(*value);
3529 7669 : }
3530 7669 : }
3531 : }
3532 :
3533 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3534 : /// track the byte size of this call in applicable metric(s).
3535 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3536 : /// Do not account for this call's byte size in any metrics.
3537 : /// The `reason` field is there to make the call sites self-documenting
3538 : /// about why they don't need the metric.
3539 : DontTrackSize { reason: &'static str },
3540 : /// Track the byte size of the call in applicable metric(s).
3541 : Bytes(u64),
3542 : }
3543 :
3544 : impl RemoteTimelineClientMetrics {
3545 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3546 : ///
3547 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3548 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3549 : /// is more suitable.
3550 : /// Never do both.
3551 7669 : pub(crate) fn call_begin(
3552 7669 : &self,
3553 7669 : file_kind: &RemoteOpFileKind,
3554 7669 : op_kind: &RemoteOpKind,
3555 7669 : size: RemoteTimelineClientMetricsCallTrackSize,
3556 7669 : ) -> RemoteTimelineClientCallMetricGuard {
3557 7669 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3558 7669 : calls_counter_pair.inc();
3559 :
3560 7669 : let bytes_finished = match size {
3561 4185 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3562 4185 : // nothing to do
3563 4185 : None
3564 : }
3565 3484 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3566 3484 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3567 3484 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3568 3484 : Some((finished_counter, size))
3569 : }
3570 : };
3571 7669 : RemoteTimelineClientCallMetricGuard {
3572 7669 : calls_counter_pair: Some(calls_counter_pair),
3573 7669 : bytes_finished,
3574 7669 : }
3575 7669 : }
3576 :
3577 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3578 : /// Using the guard object is generally preferable.
3579 : /// See [`call_begin`](Self::call_begin) for more context.
3580 6713 : pub(crate) fn call_end(
3581 6713 : &self,
3582 6713 : file_kind: &RemoteOpFileKind,
3583 6713 : op_kind: &RemoteOpKind,
3584 6713 : size: RemoteTimelineClientMetricsCallTrackSize,
3585 6713 : ) {
3586 6713 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3587 6713 : calls_counter_pair.dec();
3588 6713 : match size {
3589 3614 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3590 3099 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3591 3099 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3592 3099 : }
3593 : }
3594 6713 : }
3595 : }
3596 :
3597 : impl Drop for RemoteTimelineClientMetrics {
3598 40 : fn drop(&mut self) {
3599 40 : let RemoteTimelineClientMetrics {
3600 40 : tenant_id,
3601 40 : shard_id,
3602 40 : timeline_id,
3603 40 : remote_physical_size_gauge,
3604 40 : calls,
3605 40 : bytes_started_counter,
3606 40 : bytes_finished_counter,
3607 40 : projected_remote_consistent_lsn_gauge,
3608 40 : } = self;
3609 48 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3610 48 : let mut res = [Ok(()), Ok(())];
3611 48 : REMOTE_TIMELINE_CLIENT_CALLS
3612 48 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3613 48 : // don't care about results
3614 48 : }
3615 40 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3616 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3617 12 : tenant_id,
3618 12 : shard_id,
3619 12 : timeline_id,
3620 12 : a,
3621 12 : b,
3622 12 : ]);
3623 12 : }
3624 40 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3625 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3626 12 : tenant_id,
3627 12 : shard_id,
3628 12 : timeline_id,
3629 12 : a,
3630 12 : b,
3631 12 : ]);
3632 12 : }
3633 40 : {
3634 40 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3635 40 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3636 40 : }
3637 40 : {
3638 40 : let _ = projected_remote_consistent_lsn_gauge;
3639 40 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3640 40 : tenant_id,
3641 40 : shard_id,
3642 40 : timeline_id,
3643 40 : ]);
3644 40 : }
3645 40 : }
3646 : }
3647 :
3648 : /// Wrapper future that measures the time spent by a remote storage operation,
3649 : /// and records the time and success/failure as a prometheus metric.
3650 : pub(crate) trait MeasureRemoteOp: Sized {
3651 6397 : fn measure_remote_op(
3652 6397 : self,
3653 6397 : file_kind: RemoteOpFileKind,
3654 6397 : op: RemoteOpKind,
3655 6397 : metrics: Arc<RemoteTimelineClientMetrics>,
3656 6397 : ) -> MeasuredRemoteOp<Self> {
3657 6397 : let start = Instant::now();
3658 6397 : MeasuredRemoteOp {
3659 6397 : inner: self,
3660 6397 : file_kind,
3661 6397 : op,
3662 6397 : start,
3663 6397 : metrics,
3664 6397 : }
3665 6397 : }
3666 : }
3667 :
3668 : impl<T: Sized> MeasureRemoteOp for T {}
3669 :
3670 : pin_project! {
3671 : pub(crate) struct MeasuredRemoteOp<F>
3672 : {
3673 : #[pin]
3674 : inner: F,
3675 : file_kind: RemoteOpFileKind,
3676 : op: RemoteOpKind,
3677 : start: Instant,
3678 : metrics: Arc<RemoteTimelineClientMetrics>,
3679 : }
3680 : }
3681 :
3682 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
3683 : type Output = Result<O, E>;
3684 :
3685 91072 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3686 91072 : let this = self.project();
3687 91072 : let poll_result = this.inner.poll(cx);
3688 91072 : if let Poll::Ready(ref res) = poll_result {
3689 6112 : let duration = this.start.elapsed();
3690 6112 : let status = if res.is_ok() { &"success" } else { &"failure" };
3691 6112 : this.metrics
3692 6112 : .remote_operation_time(this.file_kind, this.op, status)
3693 6112 : .observe(duration.as_secs_f64());
3694 84960 : }
3695 91072 : poll_result
3696 91072 : }
3697 : }
3698 :
3699 : pub mod tokio_epoll_uring {
3700 : use std::collections::HashMap;
3701 : use std::sync::{Arc, Mutex};
3702 :
3703 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3704 : use once_cell::sync::Lazy;
3705 :
3706 : /// Shared storage for tokio-epoll-uring thread local metrics.
3707 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3708 238 : Lazy::new(|| {
3709 238 : let slots_submission_queue_depth = register_histogram!(
3710 238 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3711 238 : "The slots waiters queue depth of each tokio_epoll_uring system",
3712 238 : vec![
3713 238 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3714 238 : ],
3715 238 : )
3716 238 : .expect("failed to define a metric");
3717 238 : ThreadLocalMetricsStorage {
3718 238 : observers: Mutex::new(HashMap::new()),
3719 238 : slots_submission_queue_depth,
3720 238 : }
3721 238 : });
3722 :
3723 : pub struct ThreadLocalMetricsStorage {
3724 : /// List of thread local metrics observers.
3725 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3726 : /// A histogram shared between all thread local systems
3727 : /// for collecting slots submission queue depth.
3728 : slots_submission_queue_depth: Histogram,
3729 : }
3730 :
3731 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3732 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3733 : ///
3734 : /// The System makes observations into [`Self`] and periodically, the collector
3735 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3736 : ///
3737 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3738 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3739 : /// for cache coherence protocol to get an exclusive cache line.
3740 : pub struct ThreadLocalMetrics {
3741 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3742 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3743 : }
3744 :
3745 : impl ThreadLocalMetricsStorage {
3746 : /// Registers a new thread local system. Returns a thread local metrics observer.
3747 1089 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3748 1089 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3749 1089 : self.slots_submission_queue_depth.local(),
3750 1089 : ));
3751 1089 : let mut g = self.observers.lock().unwrap();
3752 1089 : g.insert(id, Arc::clone(&per_system_metrics));
3753 1089 : per_system_metrics
3754 1089 : }
3755 :
3756 : /// Removes metrics observer for a thread local system.
3757 : /// This should be called before dropping a thread local system.
3758 238 : pub fn remove_system(&self, id: u64) {
3759 238 : let mut g = self.observers.lock().unwrap();
3760 238 : g.remove(&id);
3761 238 : }
3762 :
3763 : /// Flush all thread local metrics to the shared storage.
3764 0 : pub fn flush_thread_local_metrics(&self) {
3765 0 : let g = self.observers.lock().unwrap();
3766 0 : g.values().for_each(|local| {
3767 0 : local.flush();
3768 0 : });
3769 0 : }
3770 : }
3771 :
3772 : impl ThreadLocalMetrics {
3773 1089 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3774 1089 : ThreadLocalMetrics {
3775 1089 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3776 1089 : }
3777 1089 : }
3778 :
3779 : /// Flushes the thread local metrics to shared aggregator.
3780 0 : pub fn flush(&self) {
3781 0 : let Self {
3782 0 : slots_submission_queue_depth,
3783 0 : } = self;
3784 0 : slots_submission_queue_depth.lock().unwrap().flush();
3785 0 : }
3786 : }
3787 :
3788 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3789 1819124 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3790 1819124 : let Self {
3791 1819124 : slots_submission_queue_depth,
3792 1819124 : } = self;
3793 1819124 : slots_submission_queue_depth
3794 1819124 : .lock()
3795 1819124 : .unwrap()
3796 1819124 : .observe(queue_depth as f64);
3797 1819124 : }
3798 : }
3799 :
3800 : pub struct Collector {
3801 : descs: Vec<metrics::core::Desc>,
3802 : systems_created: UIntGauge,
3803 : systems_destroyed: UIntGauge,
3804 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3805 : }
3806 :
3807 : impl metrics::core::Collector for Collector {
3808 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3809 0 : self.descs.iter().collect()
3810 0 : }
3811 :
3812 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3813 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3814 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
3815 0 : systems_created,
3816 0 : systems_destroyed,
3817 0 : } = tokio_epoll_uring::metrics::global();
3818 0 : self.systems_created.set(systems_created);
3819 0 : mfs.extend(self.systems_created.collect());
3820 0 : self.systems_destroyed.set(systems_destroyed);
3821 0 : mfs.extend(self.systems_destroyed.collect());
3822 0 :
3823 0 : self.thread_local_metrics_storage
3824 0 : .flush_thread_local_metrics();
3825 0 :
3826 0 : mfs.extend(
3827 0 : self.thread_local_metrics_storage
3828 0 : .slots_submission_queue_depth
3829 0 : .collect(),
3830 0 : );
3831 0 : mfs
3832 0 : }
3833 : }
3834 :
3835 : impl Collector {
3836 : const NMETRICS: usize = 3;
3837 :
3838 : #[allow(clippy::new_without_default)]
3839 0 : pub fn new() -> Self {
3840 0 : let mut descs = Vec::new();
3841 0 :
3842 0 : let systems_created = UIntGauge::new(
3843 0 : "pageserver_tokio_epoll_uring_systems_created",
3844 0 : "counter of tokio-epoll-uring systems that were created",
3845 0 : )
3846 0 : .unwrap();
3847 0 : descs.extend(
3848 0 : metrics::core::Collector::desc(&systems_created)
3849 0 : .into_iter()
3850 0 : .cloned(),
3851 0 : );
3852 0 :
3853 0 : let systems_destroyed = UIntGauge::new(
3854 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
3855 0 : "counter of tokio-epoll-uring systems that were destroyed",
3856 0 : )
3857 0 : .unwrap();
3858 0 : descs.extend(
3859 0 : metrics::core::Collector::desc(&systems_destroyed)
3860 0 : .into_iter()
3861 0 : .cloned(),
3862 0 : );
3863 0 :
3864 0 : Self {
3865 0 : descs,
3866 0 : systems_created,
3867 0 : systems_destroyed,
3868 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
3869 0 : }
3870 0 : }
3871 : }
3872 :
3873 238 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3874 238 : register_int_counter!(
3875 238 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
3876 238 : "Number of times where thread_local_system creation spanned multiple executor threads",
3877 238 : )
3878 238 : .unwrap()
3879 238 : });
3880 :
3881 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3882 0 : register_int_counter!(
3883 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
3884 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
3885 0 : )
3886 0 : .unwrap()
3887 0 : });
3888 : }
3889 :
3890 : pub(crate) struct GlobalAndPerTenantIntCounter {
3891 : global: IntCounter,
3892 : per_tenant: IntCounter,
3893 : }
3894 :
3895 : impl GlobalAndPerTenantIntCounter {
3896 : #[inline(always)]
3897 0 : pub(crate) fn inc(&self) {
3898 0 : self.inc_by(1)
3899 0 : }
3900 : #[inline(always)]
3901 450510 : pub(crate) fn inc_by(&self, n: u64) {
3902 450510 : self.global.inc_by(n);
3903 450510 : self.per_tenant.inc_by(n);
3904 450510 : }
3905 : }
3906 :
3907 : pub(crate) mod tenant_throttling {
3908 : use metrics::register_int_counter_vec;
3909 : use once_cell::sync::Lazy;
3910 : use utils::shard::TenantShardId;
3911 :
3912 : use super::GlobalAndPerTenantIntCounter;
3913 :
3914 : pub(crate) struct Metrics<const KIND: usize> {
3915 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
3916 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
3917 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
3918 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
3919 : }
3920 :
3921 416 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3922 416 : register_int_counter_vec!(
3923 416 : "pageserver_tenant_throttling_count_accounted_start_global",
3924 416 : "Count of tenant throttling starts, by kind of throttle.",
3925 416 : &["kind"]
3926 416 : )
3927 416 : .unwrap()
3928 416 : });
3929 416 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3930 416 : register_int_counter_vec!(
3931 416 : "pageserver_tenant_throttling_count_accounted_start",
3932 416 : "Count of tenant throttling starts, by kind of throttle.",
3933 416 : &["kind", "tenant_id", "shard_id"]
3934 416 : )
3935 416 : .unwrap()
3936 416 : });
3937 416 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3938 416 : register_int_counter_vec!(
3939 416 : "pageserver_tenant_throttling_count_accounted_finish_global",
3940 416 : "Count of tenant throttling finishes, by kind of throttle.",
3941 416 : &["kind"]
3942 416 : )
3943 416 : .unwrap()
3944 416 : });
3945 416 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3946 416 : register_int_counter_vec!(
3947 416 : "pageserver_tenant_throttling_count_accounted_finish",
3948 416 : "Count of tenant throttling finishes, by kind of throttle.",
3949 416 : &["kind", "tenant_id", "shard_id"]
3950 416 : )
3951 416 : .unwrap()
3952 416 : });
3953 416 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3954 416 : register_int_counter_vec!(
3955 416 : "pageserver_tenant_throttling_wait_usecs_sum_global",
3956 416 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3957 416 : &["kind"]
3958 416 : )
3959 416 : .unwrap()
3960 416 : });
3961 416 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3962 416 : register_int_counter_vec!(
3963 416 : "pageserver_tenant_throttling_wait_usecs_sum",
3964 416 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3965 416 : &["kind", "tenant_id", "shard_id"]
3966 416 : )
3967 416 : .unwrap()
3968 416 : });
3969 :
3970 416 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3971 416 : register_int_counter_vec!(
3972 416 : "pageserver_tenant_throttling_count_global",
3973 416 : "Count of tenant throttlings, by kind of throttle.",
3974 416 : &["kind"]
3975 416 : )
3976 416 : .unwrap()
3977 416 : });
3978 416 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3979 416 : register_int_counter_vec!(
3980 416 : "pageserver_tenant_throttling_count",
3981 416 : "Count of tenant throttlings, by kind of throttle.",
3982 416 : &["kind", "tenant_id", "shard_id"]
3983 416 : )
3984 416 : .unwrap()
3985 416 : });
3986 :
3987 : const KINDS: &[&str] = &["pagestream"];
3988 : pub type Pagestream = Metrics<0>;
3989 :
3990 : impl<const KIND: usize> Metrics<KIND> {
3991 452 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
3992 452 : let per_tenant_label_values = &[
3993 452 : KINDS[KIND],
3994 452 : &tenant_shard_id.tenant_id.to_string(),
3995 452 : &tenant_shard_id.shard_slug().to_string(),
3996 452 : ];
3997 452 : Metrics {
3998 452 : count_accounted_start: {
3999 452 : GlobalAndPerTenantIntCounter {
4000 452 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
4001 452 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
4002 452 : .with_label_values(per_tenant_label_values),
4003 452 : }
4004 452 : },
4005 452 : count_accounted_finish: {
4006 452 : GlobalAndPerTenantIntCounter {
4007 452 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
4008 452 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
4009 452 : .with_label_values(per_tenant_label_values),
4010 452 : }
4011 452 : },
4012 452 : wait_time: {
4013 452 : GlobalAndPerTenantIntCounter {
4014 452 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
4015 452 : per_tenant: WAIT_USECS_PER_TENANT
4016 452 : .with_label_values(per_tenant_label_values),
4017 452 : }
4018 452 : },
4019 452 : count_throttled: {
4020 452 : GlobalAndPerTenantIntCounter {
4021 452 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
4022 452 : per_tenant: WAIT_COUNT_PER_TENANT
4023 452 : .with_label_values(per_tenant_label_values),
4024 452 : }
4025 452 : },
4026 452 : }
4027 452 : }
4028 : }
4029 :
4030 0 : pub(crate) fn preinitialize_global_metrics() {
4031 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4032 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4033 0 : Lazy::force(&WAIT_USECS);
4034 0 : Lazy::force(&WAIT_COUNT);
4035 0 : }
4036 :
4037 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4038 48 : for m in &[
4039 12 : &COUNT_ACCOUNTED_START_PER_TENANT,
4040 12 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4041 12 : &WAIT_USECS_PER_TENANT,
4042 12 : &WAIT_COUNT_PER_TENANT,
4043 12 : ] {
4044 96 : for kind in KINDS {
4045 48 : let _ = m.remove_label_values(&[
4046 48 : kind,
4047 48 : &tenant_shard_id.tenant_id.to_string(),
4048 48 : &tenant_shard_id.shard_slug().to_string(),
4049 48 : ]);
4050 48 : }
4051 : }
4052 12 : }
4053 : }
4054 :
4055 : pub(crate) mod disk_usage_based_eviction {
4056 : use super::*;
4057 :
4058 : pub(crate) struct Metrics {
4059 : pub(crate) tenant_collection_time: Histogram,
4060 : pub(crate) tenant_layer_count: Histogram,
4061 : pub(crate) layers_collected: IntCounter,
4062 : pub(crate) layers_selected: IntCounter,
4063 : pub(crate) layers_evicted: IntCounter,
4064 : }
4065 :
4066 : impl Default for Metrics {
4067 0 : fn default() -> Self {
4068 0 : let tenant_collection_time = register_histogram!(
4069 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4070 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4071 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4072 0 : )
4073 0 : .unwrap();
4074 0 :
4075 0 : let tenant_layer_count = register_histogram!(
4076 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4077 0 : "Amount of layers gathered from a tenant",
4078 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4079 0 : )
4080 0 : .unwrap();
4081 0 :
4082 0 : let layers_collected = register_int_counter!(
4083 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4084 0 : "Amount of layers collected"
4085 0 : )
4086 0 : .unwrap();
4087 0 :
4088 0 : let layers_selected = register_int_counter!(
4089 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
4090 0 : "Amount of layers selected"
4091 0 : )
4092 0 : .unwrap();
4093 0 :
4094 0 : let layers_evicted = register_int_counter!(
4095 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4096 0 : "Amount of layers successfully evicted"
4097 0 : )
4098 0 : .unwrap();
4099 0 :
4100 0 : Self {
4101 0 : tenant_collection_time,
4102 0 : tenant_layer_count,
4103 0 : layers_collected,
4104 0 : layers_selected,
4105 0 : layers_evicted,
4106 0 : }
4107 0 : }
4108 : }
4109 :
4110 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4111 : }
4112 :
4113 404 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4114 404 : register_uint_gauge_vec!(
4115 404 : "pageserver_tokio_executor_thread_configured_count",
4116 404 : "Total number of configued tokio executor threads in the process.
4117 404 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4118 404 : &["setup"],
4119 404 : )
4120 404 : .unwrap()
4121 404 : });
4122 :
4123 404 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4124 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4125 404 : let _guard = SERIALIZE.lock().unwrap();
4126 404 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4127 404 : TOKIO_EXECUTOR_THREAD_COUNT
4128 404 : .get_metric_with_label_values(&[setup])
4129 404 : .unwrap()
4130 404 : .set(u64::try_from(num_threads.get()).unwrap());
4131 404 : }
4132 :
4133 0 : pub fn preinitialize_metrics(conf: &'static PageServerConf) {
4134 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4135 0 :
4136 0 : // Python tests need these and on some we do alerting.
4137 0 : //
4138 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4139 0 : // order:
4140 0 : // - global metrics reside in a Lazy<PageserverMetrics>
4141 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4142 0 : // - could move the statics into TimelineMetrics::new()?
4143 0 :
4144 0 : // counters
4145 0 : [
4146 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4147 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4148 0 : &WALRECEIVER_BROKER_UPDATES,
4149 0 : &WALRECEIVER_CANDIDATES_ADDED,
4150 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4151 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4152 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4153 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4154 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4155 0 : &CIRCUIT_BREAKERS_BROKEN,
4156 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4157 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4158 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4159 0 : ]
4160 0 : .into_iter()
4161 0 : .for_each(|c| {
4162 0 : Lazy::force(c);
4163 0 : });
4164 0 :
4165 0 : // Deletion queue stats
4166 0 : Lazy::force(&DELETION_QUEUE);
4167 0 :
4168 0 : // Tenant stats
4169 0 : Lazy::force(&TENANT);
4170 0 :
4171 0 : // Tenant manager stats
4172 0 : Lazy::force(&TENANT_MANAGER);
4173 0 :
4174 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4175 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4176 :
4177 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4178 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4179 0 : // values from last restart.
4180 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4181 0 : }
4182 :
4183 : // countervecs
4184 0 : [
4185 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4186 0 : &SMGR_QUERY_STARTED_GLOBAL,
4187 0 : ]
4188 0 : .into_iter()
4189 0 : .for_each(|c| {
4190 0 : Lazy::force(c);
4191 0 : });
4192 0 :
4193 0 : // gauges
4194 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4195 0 :
4196 0 : // histograms
4197 0 : [
4198 0 : &LAYERS_PER_READ_GLOBAL,
4199 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4200 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4201 0 : &DELTAS_PER_READ_GLOBAL,
4202 0 : &WAIT_LSN_TIME,
4203 0 : &WAL_REDO_TIME,
4204 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4205 0 : &WAL_REDO_BYTES_HISTOGRAM,
4206 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4207 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4208 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4209 0 : ]
4210 0 : .into_iter()
4211 0 : .for_each(|h| {
4212 0 : Lazy::force(h);
4213 0 : });
4214 0 :
4215 0 : // Custom
4216 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4217 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4218 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4219 0 :
4220 0 : tenant_throttling::preinitialize_global_metrics();
4221 0 : }
|