Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::pin::Pin;
5 : use std::sync::atomic::AtomicU64;
6 : use std::sync::{Arc, Mutex};
7 : use std::task::{Context, Poll};
8 : use std::time::{Duration, Instant};
9 :
10 : use enum_map::{Enum as _, EnumMap};
11 : use futures::Future;
12 : use metrics::{
13 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
14 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
15 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
16 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
17 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
18 : };
19 : use once_cell::sync::Lazy;
20 : use pageserver_api::config::{
21 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
22 : PageServiceProtocolPipelinedExecutionStrategy,
23 : };
24 : use pageserver_api::models::InMemoryLayerInfo;
25 : use pageserver_api::shard::TenantShardId;
26 : use pin_project_lite::pin_project;
27 : use postgres_backend::{QueryError, is_expected_io_error};
28 : use pq_proto::framed::ConnectionError;
29 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
30 : use strum_macros::{IntoStaticStr, VariantNames};
31 : use utils::id::TimelineId;
32 :
33 : use crate::config::PageServerConf;
34 : use crate::context::{PageContentKind, RequestContext};
35 : use crate::pgdatadir_mapping::DatadirModificationStats;
36 : use crate::task_mgr::TaskKind;
37 : use crate::tenant::Timeline;
38 : use crate::tenant::layer_map::LayerMap;
39 : use crate::tenant::mgr::TenantSlot;
40 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
41 : use crate::tenant::tasks::BackgroundLoopKind;
42 : use crate::tenant::throttle::ThrottleResult;
43 :
44 : /// Prometheus histogram buckets (in seconds) for operations in the critical
45 : /// path. In other words, operations that directly affect that latency of user
46 : /// queries.
47 : ///
48 : /// The buckets capture the majority of latencies in the microsecond and
49 : /// millisecond range but also extend far enough up to distinguish "bad" from
50 : /// "really bad".
51 : const CRITICAL_OP_BUCKETS: &[f64] = &[
52 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
53 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
54 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
55 : ];
56 :
57 : // Metrics collected on operations on the storage repository.
58 : #[derive(Debug, VariantNames, IntoStaticStr)]
59 : #[strum(serialize_all = "kebab_case")]
60 : pub(crate) enum StorageTimeOperation {
61 : #[strum(serialize = "layer flush")]
62 : LayerFlush,
63 :
64 : #[strum(serialize = "layer flush delay")]
65 : LayerFlushDelay,
66 :
67 : #[strum(serialize = "compact")]
68 : Compact,
69 :
70 : #[strum(serialize = "create images")]
71 : CreateImages,
72 :
73 : #[strum(serialize = "logical size")]
74 : LogicalSize,
75 :
76 : #[strum(serialize = "imitate logical size")]
77 : ImitateLogicalSize,
78 :
79 : #[strum(serialize = "load layer map")]
80 : LoadLayerMap,
81 :
82 : #[strum(serialize = "gc")]
83 : Gc,
84 :
85 : #[strum(serialize = "find gc cutoffs")]
86 : FindGcCutoffs,
87 : }
88 :
89 416 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
90 416 : register_counter_vec!(
91 416 : "pageserver_storage_operations_seconds_sum",
92 416 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
93 416 : &["operation", "tenant_id", "shard_id", "timeline_id"],
94 416 : )
95 416 : .expect("failed to define a metric")
96 416 : });
97 :
98 416 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
99 416 : register_int_counter_vec!(
100 416 : "pageserver_storage_operations_seconds_count",
101 416 : "Count of storage operations with operation, tenant and timeline dimensions",
102 416 : &["operation", "tenant_id", "shard_id", "timeline_id"],
103 416 : )
104 416 : .expect("failed to define a metric")
105 416 : });
106 :
107 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
108 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
109 :
110 416 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
111 416 : register_histogram_vec!(
112 416 : "pageserver_storage_operations_seconds_global",
113 416 : "Time spent on storage operations",
114 416 : &["operation"],
115 416 : STORAGE_OP_BUCKETS.into(),
116 416 : )
117 416 : .expect("failed to define a metric")
118 416 : });
119 :
120 : /// Measures layers visited per read (i.e. read amplification).
121 : ///
122 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
123 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
124 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
125 : /// care about.
126 416 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
127 416 : register_histogram_vec!(
128 416 : "pageserver_layers_per_read",
129 416 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
130 416 : &["tenant_id", "shard_id", "timeline_id"],
131 416 : // Low resolution to reduce cardinality.
132 416 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
133 416 : )
134 416 : .expect("failed to define a metric")
135 416 : });
136 :
137 408 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
138 408 : register_histogram!(
139 408 : "pageserver_layers_per_read_global",
140 408 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
141 408 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
142 408 : )
143 408 : .expect("failed to define a metric")
144 408 : });
145 :
146 408 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
147 408 : register_histogram!(
148 408 : "pageserver_layers_per_read_batch_global",
149 408 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
150 408 : vec![
151 408 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
152 408 : ],
153 408 : )
154 408 : .expect("failed to define a metric")
155 408 : });
156 :
157 408 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
158 408 : register_histogram!(
159 408 : "pageserver_layers_per_read_amortized_global",
160 408 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
161 408 : all visited layers are divided by number of reads.",
162 408 : vec![
163 408 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
164 408 : ],
165 408 : )
166 408 : .expect("failed to define a metric")
167 408 : });
168 :
169 408 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
170 408 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
171 408 : register_histogram!(
172 408 : "pageserver_deltas_per_read_global",
173 408 : "Number of delta pages applied to image page per read",
174 408 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
175 408 : )
176 408 : .expect("failed to define a metric")
177 408 : });
178 :
179 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
180 0 : register_uint_gauge!(
181 0 : "pageserver_concurrent_initdb",
182 0 : "Number of initdb processes running"
183 0 : )
184 0 : .expect("failed to define a metric")
185 0 : });
186 :
187 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
188 0 : register_histogram!(
189 0 : "pageserver_initdb_semaphore_seconds_global",
190 0 : "Time spent getting a permit from the global initdb semaphore",
191 0 : STORAGE_OP_BUCKETS.into()
192 0 : )
193 0 : .expect("failed to define metric")
194 0 : });
195 :
196 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
197 0 : register_histogram!(
198 0 : "pageserver_initdb_seconds_global",
199 0 : "Time spent performing initdb",
200 0 : STORAGE_OP_BUCKETS.into()
201 0 : )
202 0 : .expect("failed to define metric")
203 0 : });
204 :
205 : pub(crate) struct GetVectoredLatency {
206 : map: EnumMap<TaskKind, Option<Histogram>>,
207 : }
208 :
209 : #[allow(dead_code)]
210 : pub(crate) struct ScanLatency {
211 : map: EnumMap<TaskKind, Option<Histogram>>,
212 : }
213 :
214 : impl GetVectoredLatency {
215 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
216 : // cardinality of the metric.
217 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
218 :
219 39456 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
220 39456 : self.map[task_kind].as_ref()
221 39456 : }
222 : }
223 :
224 : impl ScanLatency {
225 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
226 : // cardinality of the metric.
227 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
228 :
229 24 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
230 24 : self.map[task_kind].as_ref()
231 24 : }
232 : }
233 :
234 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
235 : parent: &'a Histogram,
236 : start: std::time::Instant,
237 : }
238 :
239 : impl<'a> ScanLatencyOngoingRecording<'a> {
240 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
241 0 : let start = Instant::now();
242 0 : ScanLatencyOngoingRecording { parent, start }
243 0 : }
244 :
245 0 : pub(crate) fn observe(self) {
246 0 : let elapsed = self.start.elapsed();
247 0 : self.parent.observe(elapsed.as_secs_f64());
248 0 : }
249 : }
250 :
251 400 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
252 400 : let inner = register_histogram_vec!(
253 400 : "pageserver_get_vectored_seconds",
254 400 : "Time spent in get_vectored.",
255 400 : &["task_kind"],
256 400 : CRITICAL_OP_BUCKETS.into(),
257 400 : )
258 400 : .expect("failed to define a metric");
259 400 :
260 400 : GetVectoredLatency {
261 12400 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
262 12400 : let task_kind = TaskKind::from_usize(task_kind_idx);
263 12400 :
264 12400 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
265 800 : let task_kind = task_kind.into();
266 800 : Some(inner.with_label_values(&[task_kind]))
267 : } else {
268 11600 : None
269 : }
270 12400 : })),
271 400 : }
272 400 : });
273 :
274 8 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
275 8 : let inner = register_histogram_vec!(
276 8 : "pageserver_scan_seconds",
277 8 : "Time spent in scan.",
278 8 : &["task_kind"],
279 8 : CRITICAL_OP_BUCKETS.into(),
280 8 : )
281 8 : .expect("failed to define a metric");
282 8 :
283 8 : ScanLatency {
284 248 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
285 248 : let task_kind = TaskKind::from_usize(task_kind_idx);
286 248 :
287 248 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
288 8 : let task_kind = task_kind.into();
289 8 : Some(inner.with_label_values(&[task_kind]))
290 : } else {
291 240 : None
292 : }
293 248 : })),
294 8 : }
295 8 : });
296 :
297 : pub(crate) struct PageCacheMetricsForTaskKind {
298 : pub read_accesses_immutable: IntCounter,
299 : pub read_hits_immutable: IntCounter,
300 : }
301 :
302 : pub(crate) struct PageCacheMetrics {
303 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
304 : }
305 :
306 192 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
307 192 : register_int_counter_vec!(
308 192 : "pageserver_page_cache_read_hits_total",
309 192 : "Number of read accesses to the page cache that hit",
310 192 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
311 192 : )
312 192 : .expect("failed to define a metric")
313 192 : });
314 :
315 192 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
316 192 : register_int_counter_vec!(
317 192 : "pageserver_page_cache_read_accesses_total",
318 192 : "Number of read accesses to the page cache",
319 192 : &["task_kind", "key_kind", "content_kind"]
320 192 : )
321 192 : .expect("failed to define a metric")
322 192 : });
323 :
324 192 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
325 5952 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
326 5952 : let task_kind = TaskKind::from_usize(task_kind);
327 5952 : let task_kind: &'static str = task_kind.into();
328 47616 : EnumMap::from_array(std::array::from_fn(|content_kind| {
329 47616 : let content_kind = PageContentKind::from_usize(content_kind);
330 47616 : let content_kind: &'static str = content_kind.into();
331 47616 : PageCacheMetricsForTaskKind {
332 47616 : read_accesses_immutable: {
333 47616 : PAGE_CACHE_READ_ACCESSES
334 47616 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
335 47616 : .unwrap()
336 47616 : },
337 47616 :
338 47616 : read_hits_immutable: {
339 47616 : PAGE_CACHE_READ_HITS
340 47616 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
341 47616 : .unwrap()
342 47616 : },
343 47616 : }
344 47616 : }))
345 5952 : })),
346 192 : });
347 :
348 : impl PageCacheMetrics {
349 1950332 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
350 1950332 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
351 1950332 : }
352 : }
353 :
354 : pub(crate) struct PageCacheSizeMetrics {
355 : pub max_bytes: UIntGauge,
356 :
357 : pub current_bytes_immutable: UIntGauge,
358 : }
359 :
360 192 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
361 192 : register_uint_gauge_vec!(
362 192 : "pageserver_page_cache_size_current_bytes",
363 192 : "Current size of the page cache in bytes, by key kind",
364 192 : &["key_kind"]
365 192 : )
366 192 : .expect("failed to define a metric")
367 192 : });
368 :
369 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
370 192 : Lazy::new(|| PageCacheSizeMetrics {
371 192 : max_bytes: {
372 192 : register_uint_gauge!(
373 192 : "pageserver_page_cache_size_max_bytes",
374 192 : "Maximum size of the page cache in bytes"
375 192 : )
376 192 : .expect("failed to define a metric")
377 192 : },
378 192 : current_bytes_immutable: {
379 192 : PAGE_CACHE_SIZE_CURRENT_BYTES
380 192 : .get_metric_with_label_values(&["immutable"])
381 192 : .unwrap()
382 192 : },
383 192 : });
384 :
385 : pub(crate) mod page_cache_eviction_metrics {
386 : use std::num::NonZeroUsize;
387 :
388 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
389 : use once_cell::sync::Lazy;
390 :
391 : #[derive(Clone, Copy)]
392 : pub(crate) enum Outcome {
393 : FoundSlotUnused { iters: NonZeroUsize },
394 : FoundSlotEvicted { iters: NonZeroUsize },
395 : ItersExceeded { iters: NonZeroUsize },
396 : }
397 :
398 192 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
399 192 : register_int_counter_vec!(
400 192 : "pageserver_page_cache_find_victim_iters_total",
401 192 : "Counter for the number of iterations in the find_victim loop",
402 192 : &["outcome"],
403 192 : )
404 192 : .expect("failed to define a metric")
405 192 : });
406 :
407 192 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
408 192 : register_int_counter_vec!(
409 192 : "pageserver_page_cache_find_victim_calls",
410 192 : "Incremented at the end of each find_victim() call.\
411 192 : Filter by outcome to get e.g., eviction rate.",
412 192 : &["outcome"]
413 192 : )
414 192 : .unwrap()
415 192 : });
416 :
417 63388 : pub(crate) fn observe(outcome: Outcome) {
418 : macro_rules! dry {
419 : ($label:literal, $iters:expr) => {{
420 : static LABEL: &'static str = $label;
421 : static ITERS_TOTAL: Lazy<IntCounter> =
422 232 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
423 : static CALLS: Lazy<IntCounter> =
424 232 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
425 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
426 : CALLS.inc();
427 : }};
428 : }
429 63388 : match outcome {
430 3296 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
431 60092 : Outcome::FoundSlotEvicted { iters } => {
432 60092 : dry!("found_evicted", iters)
433 : }
434 0 : Outcome::ItersExceeded { iters } => {
435 0 : dry!("err_iters_exceeded", iters);
436 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
437 0 : }
438 : }
439 63388 : }
440 : }
441 :
442 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
443 0 : register_int_counter_vec!(
444 0 : "page_cache_errors_total",
445 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
446 0 : &["error_kind"]
447 0 : )
448 0 : .expect("failed to define a metric")
449 0 : });
450 :
451 : #[derive(IntoStaticStr)]
452 : #[strum(serialize_all = "kebab_case")]
453 : pub(crate) enum PageCacheErrorKind {
454 : AcquirePinnedSlotTimeout,
455 : EvictIterLimit,
456 : }
457 :
458 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
459 0 : PAGE_CACHE_ERRORS
460 0 : .get_metric_with_label_values(&[error_kind.into()])
461 0 : .unwrap()
462 0 : .inc();
463 0 : }
464 :
465 44 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
466 44 : register_histogram!(
467 44 : "pageserver_wait_lsn_seconds",
468 44 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
469 44 : CRITICAL_OP_BUCKETS.into(),
470 44 : )
471 44 : .expect("failed to define a metric")
472 44 : });
473 :
474 416 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
475 416 : register_int_counter_pair_vec!(
476 416 : "pageserver_wait_lsn_started_count",
477 416 : "Number of wait_lsn operations started.",
478 416 : "pageserver_wait_lsn_finished_count",
479 416 : "Number of wait_lsn operations finished.",
480 416 : &["tenant_id", "shard_id", "timeline_id"],
481 416 : )
482 416 : .expect("failed to define a metric")
483 416 : });
484 :
485 416 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
486 416 : register_int_counter_vec!(
487 416 : "pageserver_wait_lsn_in_progress_micros",
488 416 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
489 416 : &["tenant_id", "shard_id", "timeline_id"],
490 416 : )
491 416 : .expect("failed to define a metric")
492 416 : });
493 :
494 416 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
495 416 : register_int_counter!(
496 416 : "pageserver_wait_lsn_in_progress_micros_global",
497 416 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
498 416 : )
499 416 : .expect("failed to define a metric")
500 416 : });
501 :
502 416 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
503 416 : register_int_gauge_vec!(
504 416 : "pageserver_last_record_lsn",
505 416 : "Last record LSN grouped by timeline",
506 416 : &["tenant_id", "shard_id", "timeline_id"]
507 416 : )
508 416 : .expect("failed to define a metric")
509 416 : });
510 :
511 416 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
512 416 : register_int_gauge_vec!(
513 416 : "pageserver_disk_consistent_lsn",
514 416 : "Disk consistent LSN grouped by timeline",
515 416 : &["tenant_id", "shard_id", "timeline_id"]
516 416 : )
517 416 : .expect("failed to define a metric")
518 416 : });
519 :
520 416 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
521 416 : register_uint_gauge_vec!(
522 416 : "pageserver_projected_remote_consistent_lsn",
523 416 : "Projected remote consistent LSN grouped by timeline",
524 416 : &["tenant_id", "shard_id", "timeline_id"]
525 416 : )
526 416 : .expect("failed to define a metric")
527 416 : });
528 :
529 416 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
530 416 : register_uint_gauge_vec!(
531 416 : "pageserver_pitr_history_size",
532 416 : "Data written since PITR cutoff on this timeline",
533 416 : &["tenant_id", "shard_id", "timeline_id"]
534 416 : )
535 416 : .expect("failed to define a metric")
536 416 : });
537 :
538 : #[derive(
539 240 : strum_macros::EnumIter,
540 0 : strum_macros::EnumString,
541 : strum_macros::Display,
542 : strum_macros::IntoStaticStr,
543 : )]
544 : #[strum(serialize_all = "kebab_case")]
545 : pub(crate) enum LayerKind {
546 : Delta,
547 : Image,
548 : }
549 :
550 : #[derive(
551 100 : strum_macros::EnumIter,
552 0 : strum_macros::EnumString,
553 : strum_macros::Display,
554 : strum_macros::IntoStaticStr,
555 : )]
556 : #[strum(serialize_all = "kebab_case")]
557 : pub(crate) enum LayerLevel {
558 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
559 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
560 : Frozen,
561 : L0,
562 : L1,
563 : }
564 :
565 408 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
566 408 : register_uint_gauge_vec!(
567 408 : "pageserver_layer_bytes",
568 408 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
569 408 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
570 408 : )
571 408 : .expect("failed to define a metric")
572 408 : });
573 :
574 408 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
575 408 : register_uint_gauge_vec!(
576 408 : "pageserver_layer_count",
577 408 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
578 408 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
579 408 : )
580 408 : .expect("failed to define a metric")
581 408 : });
582 :
583 416 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
584 416 : register_uint_gauge_vec!(
585 416 : "pageserver_archive_size",
586 416 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
587 416 : &["tenant_id", "shard_id", "timeline_id"]
588 416 : )
589 416 : .expect("failed to define a metric")
590 416 : });
591 :
592 416 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
593 416 : register_int_gauge_vec!(
594 416 : "pageserver_standby_horizon",
595 416 : "Standby apply LSN for which GC is hold off, by timeline.",
596 416 : &["tenant_id", "shard_id", "timeline_id"]
597 416 : )
598 416 : .expect("failed to define a metric")
599 416 : });
600 :
601 416 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
602 416 : register_uint_gauge_vec!(
603 416 : "pageserver_resident_physical_size",
604 416 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
605 416 : &["tenant_id", "shard_id", "timeline_id"]
606 416 : )
607 416 : .expect("failed to define a metric")
608 416 : });
609 :
610 416 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
611 416 : register_uint_gauge_vec!(
612 416 : "pageserver_visible_physical_size",
613 416 : "The size of the layer files present in the pageserver's filesystem.",
614 416 : &["tenant_id", "shard_id", "timeline_id"]
615 416 : )
616 416 : .expect("failed to define a metric")
617 416 : });
618 :
619 408 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
620 408 : register_uint_gauge!(
621 408 : "pageserver_resident_physical_size_global",
622 408 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
623 408 : )
624 408 : .expect("failed to define a metric")
625 408 : });
626 :
627 416 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
628 416 : register_uint_gauge_vec!(
629 416 : "pageserver_remote_physical_size",
630 416 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
631 416 : // Corollary: If any files are missing from the index part, they won't be included here.
632 416 : &["tenant_id", "shard_id", "timeline_id"]
633 416 : )
634 416 : .expect("failed to define a metric")
635 416 : });
636 :
637 416 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
638 416 : register_uint_gauge!(
639 416 : "pageserver_remote_physical_size_global",
640 416 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
641 416 : )
642 416 : .expect("failed to define a metric")
643 416 : });
644 :
645 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
646 12 : register_int_counter!(
647 12 : "pageserver_remote_ondemand_downloaded_layers_total",
648 12 : "Total on-demand downloaded layers"
649 12 : )
650 12 : .unwrap()
651 12 : });
652 :
653 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
654 12 : register_int_counter!(
655 12 : "pageserver_remote_ondemand_downloaded_bytes_total",
656 12 : "Total bytes of layers on-demand downloaded",
657 12 : )
658 12 : .unwrap()
659 12 : });
660 :
661 416 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
662 416 : register_uint_gauge_vec!(
663 416 : "pageserver_current_logical_size",
664 416 : "Current logical size grouped by timeline",
665 416 : &["tenant_id", "shard_id", "timeline_id"]
666 416 : )
667 416 : .expect("failed to define current logical size metric")
668 416 : });
669 :
670 416 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
671 416 : register_int_gauge_vec!(
672 416 : "pageserver_aux_file_estimated_size",
673 416 : "The size of all aux files for a timeline in aux file v2 store.",
674 416 : &["tenant_id", "shard_id", "timeline_id"]
675 416 : )
676 416 : .expect("failed to define a metric")
677 416 : });
678 :
679 416 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
680 416 : register_uint_gauge_vec!(
681 416 : "pageserver_valid_lsn_lease_count",
682 416 : "The number of valid leases after refreshing gc info.",
683 416 : &["tenant_id", "shard_id", "timeline_id"],
684 416 : )
685 416 : .expect("failed to define a metric")
686 416 : });
687 :
688 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
689 0 : register_int_counter!(
690 0 : "pageserver_circuit_breaker_broken",
691 0 : "How many times a circuit breaker has broken"
692 0 : )
693 0 : .expect("failed to define a metric")
694 0 : });
695 :
696 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
697 0 : register_int_counter!(
698 0 : "pageserver_circuit_breaker_unbroken",
699 0 : "How many times a circuit breaker has been un-broken (recovered)"
700 0 : )
701 0 : .expect("failed to define a metric")
702 0 : });
703 :
704 400 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
705 400 : register_int_counter!(
706 400 : "pageserver_compression_image_in_bytes_total",
707 400 : "Size of data written into image layers before compression"
708 400 : )
709 400 : .expect("failed to define a metric")
710 400 : });
711 :
712 400 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
713 400 : register_int_counter!(
714 400 : "pageserver_compression_image_in_bytes_considered",
715 400 : "Size of potentially compressible data written into image layers before compression"
716 400 : )
717 400 : .expect("failed to define a metric")
718 400 : });
719 :
720 400 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
721 400 : register_int_counter!(
722 400 : "pageserver_compression_image_in_bytes_chosen",
723 400 : "Size of data whose compressed form was written into image layers"
724 400 : )
725 400 : .expect("failed to define a metric")
726 400 : });
727 :
728 400 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
729 400 : register_int_counter!(
730 400 : "pageserver_compression_image_out_bytes_total",
731 400 : "Size of compressed image layer written"
732 400 : )
733 400 : .expect("failed to define a metric")
734 400 : });
735 :
736 20 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
737 20 : register_uint_gauge!(
738 20 : "pageserver_relsize_cache_entries",
739 20 : "Number of entries in the relation size cache",
740 20 : )
741 20 : .expect("failed to define a metric")
742 20 : });
743 :
744 20 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
745 20 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
746 20 : .expect("failed to define a metric")
747 20 : });
748 :
749 20 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
750 20 : register_int_counter!(
751 20 : "pageserver_relsize_cache_misses",
752 20 : "Relation size cache misses",
753 20 : )
754 20 : .expect("failed to define a metric")
755 20 : });
756 :
757 8 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
758 8 : register_int_counter!(
759 8 : "pageserver_relsize_cache_misses_old",
760 8 : "Relation size cache misses where the lookup LSN is older than the last relation update"
761 8 : )
762 8 : .expect("failed to define a metric")
763 8 : });
764 :
765 : pub(crate) mod initial_logical_size {
766 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
767 : use once_cell::sync::Lazy;
768 :
769 : pub(crate) struct StartCalculation(IntCounterVec);
770 416 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
771 416 : StartCalculation(
772 416 : register_int_counter_vec!(
773 416 : "pageserver_initial_logical_size_start_calculation",
774 416 : "Incremented each time we start an initial logical size calculation attempt. \
775 416 : The `circumstances` label provides some additional details.",
776 416 : &["attempt", "circumstances"]
777 416 : )
778 416 : .unwrap(),
779 416 : )
780 416 : });
781 :
782 : struct DropCalculation {
783 : first: IntCounter,
784 : retry: IntCounter,
785 : }
786 :
787 416 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
788 416 : let vec = register_int_counter_vec!(
789 416 : "pageserver_initial_logical_size_drop_calculation",
790 416 : "Incremented each time we abort a started size calculation attmpt.",
791 416 : &["attempt"]
792 416 : )
793 416 : .unwrap();
794 416 : DropCalculation {
795 416 : first: vec.with_label_values(&["first"]),
796 416 : retry: vec.with_label_values(&["retry"]),
797 416 : }
798 416 : });
799 :
800 : pub(crate) struct Calculated {
801 : pub(crate) births: IntCounter,
802 : pub(crate) deaths: IntCounter,
803 : }
804 :
805 416 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
806 416 : births: register_int_counter!(
807 416 : "pageserver_initial_logical_size_finish_calculation",
808 416 : "Incremented every time we finish calculation of initial logical size.\
809 416 : If everything is working well, this should happen at most once per Timeline object."
810 416 : )
811 416 : .unwrap(),
812 416 : deaths: register_int_counter!(
813 416 : "pageserver_initial_logical_size_drop_finished_calculation",
814 416 : "Incremented when we drop a finished initial logical size calculation result.\
815 416 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
816 416 : )
817 416 : .unwrap(),
818 416 : });
819 :
820 : pub(crate) struct OngoingCalculationGuard {
821 : inc_drop_calculation: Option<IntCounter>,
822 : }
823 :
824 : #[derive(strum_macros::IntoStaticStr)]
825 : pub(crate) enum StartCircumstances {
826 : EmptyInitial,
827 : SkippedConcurrencyLimiter,
828 : AfterBackgroundTasksRateLimit,
829 : }
830 :
831 : impl StartCalculation {
832 440 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
833 440 : let circumstances_label: &'static str = circumstances.into();
834 440 : self.0
835 440 : .with_label_values(&["first", circumstances_label])
836 440 : .inc();
837 440 : OngoingCalculationGuard {
838 440 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
839 440 : }
840 440 : }
841 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
842 0 : let circumstances_label: &'static str = circumstances.into();
843 0 : self.0
844 0 : .with_label_values(&["retry", circumstances_label])
845 0 : .inc();
846 0 : OngoingCalculationGuard {
847 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
848 0 : }
849 0 : }
850 : }
851 :
852 : impl Drop for OngoingCalculationGuard {
853 440 : fn drop(&mut self) {
854 440 : if let Some(counter) = self.inc_drop_calculation.take() {
855 0 : counter.inc();
856 440 : }
857 440 : }
858 : }
859 :
860 : impl OngoingCalculationGuard {
861 440 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
862 440 : drop(self.inc_drop_calculation.take());
863 440 : CALCULATED.births.inc();
864 440 : FinishedCalculationGuard {
865 440 : inc_on_drop: CALCULATED.deaths.clone(),
866 440 : }
867 440 : }
868 : }
869 :
870 : pub(crate) struct FinishedCalculationGuard {
871 : inc_on_drop: IntCounter,
872 : }
873 :
874 : impl Drop for FinishedCalculationGuard {
875 12 : fn drop(&mut self) {
876 12 : self.inc_on_drop.inc();
877 12 : }
878 : }
879 :
880 : // context: https://github.com/neondatabase/neon/issues/5963
881 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
882 0 : Lazy::new(|| {
883 0 : register_int_counter!(
884 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
885 0 : "Counter for the following event: walreceiver calls\
886 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
887 0 : )
888 0 : .unwrap()
889 0 : });
890 : }
891 :
892 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
893 0 : register_uint_gauge_vec!(
894 0 : "pageserver_directory_entries_count",
895 0 : "Sum of the entries in pageserver-stored directory listings",
896 0 : &["tenant_id", "shard_id", "timeline_id"]
897 0 : )
898 0 : .expect("failed to define a metric")
899 0 : });
900 :
901 420 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
902 420 : register_uint_gauge_vec!(
903 420 : "pageserver_tenant_states_count",
904 420 : "Count of tenants per state",
905 420 : &["state"]
906 420 : )
907 420 : .expect("Failed to register pageserver_tenant_states_count metric")
908 420 : });
909 :
910 : /// A set of broken tenants.
911 : ///
912 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
913 : /// tenant.
914 20 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
915 20 : register_uint_gauge_vec!(
916 20 : "pageserver_broken_tenants_count",
917 20 : "Set of broken tenants",
918 20 : &["tenant_id", "shard_id"]
919 20 : )
920 20 : .expect("Failed to register pageserver_tenant_states_count metric")
921 20 : });
922 :
923 12 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
924 12 : register_uint_gauge_vec!(
925 12 : "pageserver_tenant_synthetic_cached_size_bytes",
926 12 : "Synthetic size of each tenant in bytes",
927 12 : &["tenant_id"]
928 12 : )
929 12 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
930 12 : });
931 :
932 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
933 0 : register_histogram_vec!(
934 0 : "pageserver_eviction_iteration_duration_seconds_global",
935 0 : "Time spent on a single eviction iteration",
936 0 : &["period_secs", "threshold_secs"],
937 0 : STORAGE_OP_BUCKETS.into(),
938 0 : )
939 0 : .expect("failed to define a metric")
940 0 : });
941 :
942 416 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
943 416 : register_int_counter_vec!(
944 416 : "pageserver_evictions",
945 416 : "Number of layers evicted from the pageserver",
946 416 : &["tenant_id", "shard_id", "timeline_id"]
947 416 : )
948 416 : .expect("failed to define a metric")
949 416 : });
950 :
951 416 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
952 416 : register_int_counter_vec!(
953 416 : "pageserver_evictions_with_low_residence_duration",
954 416 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
955 416 : Residence duration is determined using the `residence_duration_data_source`.",
956 416 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
957 416 : )
958 416 : .expect("failed to define a metric")
959 416 : });
960 :
961 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
962 0 : register_int_counter!(
963 0 : "pageserver_unexpected_ondemand_downloads_count",
964 0 : "Number of unexpected on-demand downloads. \
965 0 : We log more context for each increment, so, forgo any labels in this metric.",
966 0 : )
967 0 : .expect("failed to define a metric")
968 0 : });
969 :
970 : /// How long did we take to start up? Broken down by labels to describe
971 : /// different phases of startup.
972 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
973 0 : register_gauge_vec!(
974 0 : "pageserver_startup_duration_seconds",
975 0 : "Time taken by phases of pageserver startup, in seconds",
976 0 : &["phase"]
977 0 : )
978 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
979 0 : });
980 :
981 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
982 0 : register_uint_gauge!(
983 0 : "pageserver_startup_is_loading",
984 0 : "1 while in initial startup load of tenants, 0 at other times"
985 0 : )
986 0 : .expect("Failed to register pageserver_startup_is_loading")
987 0 : });
988 :
989 408 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
990 408 : register_uint_gauge!(
991 408 : "pageserver_timeline_ephemeral_bytes",
992 408 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
993 408 : )
994 408 : .expect("Failed to register metric")
995 408 : });
996 :
997 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
998 : /// like how long it took to load.
999 : ///
1000 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1001 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1002 : /// at a timeline level than tenant level.
1003 : pub(crate) struct TenantMetrics {
1004 : /// How long did tenants take to go from construction to active state?
1005 : pub(crate) activation: Histogram,
1006 : pub(crate) preload: Histogram,
1007 : pub(crate) attach: Histogram,
1008 :
1009 : /// How many tenants are included in the initial startup of the pagesrever?
1010 : pub(crate) startup_scheduled: IntCounter,
1011 : pub(crate) startup_complete: IntCounter,
1012 : }
1013 :
1014 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1015 0 : TenantMetrics {
1016 0 : activation: register_histogram!(
1017 0 : "pageserver_tenant_activation_seconds",
1018 0 : "Time taken by tenants to activate, in seconds",
1019 0 : CRITICAL_OP_BUCKETS.into()
1020 0 : )
1021 0 : .expect("Failed to register metric"),
1022 0 : preload: register_histogram!(
1023 0 : "pageserver_tenant_preload_seconds",
1024 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1025 0 : CRITICAL_OP_BUCKETS.into()
1026 0 : )
1027 0 : .expect("Failed to register metric"),
1028 0 : attach: register_histogram!(
1029 0 : "pageserver_tenant_attach_seconds",
1030 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1031 0 : CRITICAL_OP_BUCKETS.into()
1032 0 : )
1033 0 : .expect("Failed to register metric"),
1034 0 : startup_scheduled: register_int_counter!(
1035 0 : "pageserver_tenant_startup_scheduled",
1036 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1037 0 : ).expect("Failed to register metric"),
1038 0 : startup_complete: register_int_counter!(
1039 0 : "pageserver_tenant_startup_complete",
1040 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1041 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1042 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1043 0 : ).expect("Failed to register metric"),
1044 0 : }
1045 0 : });
1046 :
1047 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1048 : #[derive(Debug)]
1049 : pub(crate) struct EvictionsWithLowResidenceDuration {
1050 : data_source: &'static str,
1051 : threshold: Duration,
1052 : counter: Option<IntCounter>,
1053 : }
1054 :
1055 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1056 : data_source: &'static str,
1057 : threshold: Duration,
1058 : }
1059 :
1060 : impl EvictionsWithLowResidenceDurationBuilder {
1061 920 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1062 920 : Self {
1063 920 : data_source,
1064 920 : threshold,
1065 920 : }
1066 920 : }
1067 :
1068 920 : fn build(
1069 920 : &self,
1070 920 : tenant_id: &str,
1071 920 : shard_id: &str,
1072 920 : timeline_id: &str,
1073 920 : ) -> EvictionsWithLowResidenceDuration {
1074 920 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1075 920 : .get_metric_with_label_values(&[
1076 920 : tenant_id,
1077 920 : shard_id,
1078 920 : timeline_id,
1079 920 : self.data_source,
1080 920 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1081 920 : ])
1082 920 : .unwrap();
1083 920 : EvictionsWithLowResidenceDuration {
1084 920 : data_source: self.data_source,
1085 920 : threshold: self.threshold,
1086 920 : counter: Some(counter),
1087 920 : }
1088 920 : }
1089 : }
1090 :
1091 : impl EvictionsWithLowResidenceDuration {
1092 940 : fn threshold_label_value(threshold: Duration) -> String {
1093 940 : format!("{}", threshold.as_secs())
1094 940 : }
1095 :
1096 8 : pub fn observe(&self, observed_value: Duration) {
1097 8 : if observed_value < self.threshold {
1098 8 : self.counter
1099 8 : .as_ref()
1100 8 : .expect("nobody calls this function after `remove_from_vec`")
1101 8 : .inc();
1102 8 : }
1103 8 : }
1104 :
1105 0 : pub fn change_threshold(
1106 0 : &mut self,
1107 0 : tenant_id: &str,
1108 0 : shard_id: &str,
1109 0 : timeline_id: &str,
1110 0 : new_threshold: Duration,
1111 0 : ) {
1112 0 : if new_threshold == self.threshold {
1113 0 : return;
1114 0 : }
1115 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1116 0 : self.data_source,
1117 0 : new_threshold,
1118 0 : )
1119 0 : .build(tenant_id, shard_id, timeline_id);
1120 0 : std::mem::swap(self, &mut with_new);
1121 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1122 0 : }
1123 :
1124 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1125 20 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1126 20 : let Some(_counter) = self.counter.take() else {
1127 0 : return;
1128 : };
1129 :
1130 20 : let threshold = Self::threshold_label_value(self.threshold);
1131 20 :
1132 20 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1133 20 : tenant_id,
1134 20 : shard_id,
1135 20 : timeline_id,
1136 20 : self.data_source,
1137 20 : &threshold,
1138 20 : ]);
1139 20 :
1140 20 : match removed {
1141 0 : Err(e) => {
1142 0 : // this has been hit in staging as
1143 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1144 0 : // because we can be in the drop path already, don't risk:
1145 0 : // - "double-panic => illegal instruction" or
1146 0 : // - future "drop panick => abort"
1147 0 : //
1148 0 : // so just nag: (the error has the labels)
1149 0 : tracing::warn!(
1150 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1151 : );
1152 : }
1153 : Ok(()) => {
1154 : // to help identify cases where we double-remove the same values, let's log all
1155 : // deletions?
1156 20 : tracing::info!(
1157 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1158 : self.data_source
1159 : );
1160 : }
1161 : }
1162 20 : }
1163 : }
1164 :
1165 : // Metrics collected on disk IO operations
1166 : //
1167 : // Roughly logarithmic scale.
1168 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1169 : 0.000030, // 30 usec
1170 : 0.001000, // 1000 usec
1171 : 0.030, // 30 ms
1172 : 1.000, // 1000 ms
1173 : 30.000, // 30000 ms
1174 : ];
1175 :
1176 : /// VirtualFile fs operation variants.
1177 : ///
1178 : /// Operations:
1179 : /// - open ([`std::fs::OpenOptions::open`])
1180 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1181 : /// - close-by-replace (close by replacement algorithm)
1182 : /// - read (`read_at`)
1183 : /// - write (`write_at`)
1184 : /// - seek (modify internal position or file length query)
1185 : /// - fsync ([`std::fs::File::sync_all`])
1186 : /// - metadata ([`std::fs::File::metadata`])
1187 : #[derive(
1188 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1189 : )]
1190 : pub(crate) enum StorageIoOperation {
1191 : Open,
1192 : OpenAfterReplace,
1193 : Close,
1194 : CloseByReplace,
1195 : Read,
1196 : Write,
1197 : Seek,
1198 : Fsync,
1199 : Metadata,
1200 : }
1201 :
1202 : impl StorageIoOperation {
1203 4284 : pub fn as_str(&self) -> &'static str {
1204 4284 : match self {
1205 476 : StorageIoOperation::Open => "open",
1206 476 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1207 476 : StorageIoOperation::Close => "close",
1208 476 : StorageIoOperation::CloseByReplace => "close-by-replace",
1209 476 : StorageIoOperation::Read => "read",
1210 476 : StorageIoOperation::Write => "write",
1211 476 : StorageIoOperation::Seek => "seek",
1212 476 : StorageIoOperation::Fsync => "fsync",
1213 476 : StorageIoOperation::Metadata => "metadata",
1214 : }
1215 4284 : }
1216 : }
1217 :
1218 : /// Tracks time taken by fs operations near VirtualFile.
1219 : #[derive(Debug)]
1220 : pub(crate) struct StorageIoTime {
1221 : metrics: [Histogram; StorageIoOperation::COUNT],
1222 : }
1223 :
1224 : impl StorageIoTime {
1225 476 : fn new() -> Self {
1226 476 : let storage_io_histogram_vec = register_histogram_vec!(
1227 476 : "pageserver_io_operations_seconds",
1228 476 : "Time spent in IO operations",
1229 476 : &["operation"],
1230 476 : STORAGE_IO_TIME_BUCKETS.into()
1231 476 : )
1232 476 : .expect("failed to define a metric");
1233 4284 : let metrics = std::array::from_fn(|i| {
1234 4284 : let op = StorageIoOperation::from_repr(i).unwrap();
1235 4284 : storage_io_histogram_vec
1236 4284 : .get_metric_with_label_values(&[op.as_str()])
1237 4284 : .unwrap()
1238 4284 : });
1239 476 : Self { metrics }
1240 476 : }
1241 :
1242 4026414 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1243 4026414 : &self.metrics[op as usize]
1244 4026414 : }
1245 : }
1246 :
1247 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1248 :
1249 : #[derive(Clone, Copy)]
1250 : #[repr(usize)]
1251 : enum StorageIoSizeOperation {
1252 : Read,
1253 : Write,
1254 : }
1255 :
1256 : impl StorageIoSizeOperation {
1257 : const VARIANTS: &'static [&'static str] = &["read", "write"];
1258 :
1259 2944 : fn as_str(&self) -> &'static str {
1260 2944 : Self::VARIANTS[*self as usize]
1261 2944 : }
1262 : }
1263 :
1264 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1265 552 : static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1266 552 : register_uint_gauge_vec!(
1267 552 : "pageserver_io_operations_bytes_total",
1268 552 : "Total amount of bytes read/written in IO operations",
1269 552 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1270 552 : )
1271 552 : .expect("failed to define a metric")
1272 552 : });
1273 :
1274 : #[derive(Clone, Debug)]
1275 : pub(crate) struct StorageIoSizeMetrics {
1276 : pub read: UIntGauge,
1277 : pub write: UIntGauge,
1278 : }
1279 :
1280 : impl StorageIoSizeMetrics {
1281 1472 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1282 1472 : let read = STORAGE_IO_SIZE
1283 1472 : .get_metric_with_label_values(&[
1284 1472 : StorageIoSizeOperation::Read.as_str(),
1285 1472 : tenant_id,
1286 1472 : shard_id,
1287 1472 : timeline_id,
1288 1472 : ])
1289 1472 : .unwrap();
1290 1472 : let write = STORAGE_IO_SIZE
1291 1472 : .get_metric_with_label_values(&[
1292 1472 : StorageIoSizeOperation::Write.as_str(),
1293 1472 : tenant_id,
1294 1472 : shard_id,
1295 1472 : timeline_id,
1296 1472 : ])
1297 1472 : .unwrap();
1298 1472 : Self { read, write }
1299 1472 : }
1300 : }
1301 :
1302 : #[cfg(not(test))]
1303 : pub(crate) mod virtual_file_descriptor_cache {
1304 : use super::*;
1305 :
1306 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1307 0 : register_uint_gauge!(
1308 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1309 0 : "Maximum number of open file descriptors in the cache."
1310 0 : )
1311 0 : .unwrap()
1312 0 : });
1313 :
1314 : // SIZE_CURRENT: derive it like so:
1315 : // ```
1316 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1317 : // -ignoring(operation)
1318 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1319 : // ```
1320 : }
1321 :
1322 : #[cfg(not(test))]
1323 : pub(crate) mod virtual_file_io_engine {
1324 : use super::*;
1325 :
1326 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1327 0 : register_uint_gauge_vec!(
1328 0 : "pageserver_virtual_file_io_engine_kind",
1329 0 : "The configured io engine for VirtualFile",
1330 0 : &["kind"],
1331 0 : )
1332 0 : .unwrap()
1333 0 : });
1334 : }
1335 :
1336 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1337 : pub(crate) struct SmgrOpTimerInner {
1338 : global_execution_latency_histo: Histogram,
1339 : per_timeline_execution_latency_histo: Option<Histogram>,
1340 :
1341 : global_batch_wait_time: Histogram,
1342 : per_timeline_batch_wait_time: Histogram,
1343 :
1344 : global_flush_in_progress_micros: IntCounter,
1345 : per_timeline_flush_in_progress_micros: IntCounter,
1346 :
1347 : throttling: Arc<tenant_throttling::Pagestream>,
1348 :
1349 : timings: SmgrOpTimerState,
1350 : }
1351 :
1352 : /// The stages of request processing are represented by the enum variants.
1353 : /// Used as part of [`SmgrOpTimerInner::timings`].
1354 : ///
1355 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1356 : /// transition points.
1357 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1358 : /// to the next state.
1359 : ///
1360 : /// Each request goes through every stage, in all configurations.
1361 : ///
1362 : #[derive(Debug)]
1363 : enum SmgrOpTimerState {
1364 : Received {
1365 : // In the future, we may want to track the full time the request spent
1366 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1367 : // `received_at` would be used for that.
1368 : #[allow(dead_code)]
1369 : received_at: Instant,
1370 : },
1371 : Throttling {
1372 : throttle_started_at: Instant,
1373 : },
1374 : Batching {
1375 : throttle_done_at: Instant,
1376 : },
1377 : Executing {
1378 : execution_started_at: Instant,
1379 : },
1380 : Flushing,
1381 : // NB: when adding observation points, remember to update the Drop impl.
1382 : }
1383 :
1384 : // NB: when adding observation points, remember to update the Drop impl.
1385 : impl SmgrOpTimer {
1386 : /// See [`SmgrOpTimerState`] for more context.
1387 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1388 0 : let Some(inner) = self.0.as_mut() else {
1389 0 : return;
1390 : };
1391 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1392 0 : return;
1393 : };
1394 0 : inner.throttling.count_accounted_start.inc();
1395 0 : inner.timings = SmgrOpTimerState::Throttling {
1396 0 : throttle_started_at: at,
1397 0 : };
1398 0 : }
1399 :
1400 : /// See [`SmgrOpTimerState`] for more context.
1401 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1402 0 : let Some(inner) = self.0.as_mut() else {
1403 0 : return;
1404 : };
1405 : let SmgrOpTimerState::Throttling {
1406 0 : throttle_started_at,
1407 0 : } = &inner.timings
1408 : else {
1409 0 : return;
1410 : };
1411 0 : inner.throttling.count_accounted_finish.inc();
1412 0 : match throttle {
1413 0 : ThrottleResult::NotThrottled { end } => {
1414 0 : inner.timings = SmgrOpTimerState::Batching {
1415 0 : throttle_done_at: end,
1416 0 : };
1417 0 : }
1418 0 : ThrottleResult::Throttled { end } => {
1419 0 : // update metrics
1420 0 : inner.throttling.count_throttled.inc();
1421 0 : inner
1422 0 : .throttling
1423 0 : .wait_time
1424 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1425 0 : // state transition
1426 0 : inner.timings = SmgrOpTimerState::Batching {
1427 0 : throttle_done_at: end,
1428 0 : };
1429 0 : }
1430 : }
1431 0 : }
1432 :
1433 : /// See [`SmgrOpTimerState`] for more context.
1434 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1435 0 : let Some(inner) = self.0.as_mut() else {
1436 0 : return;
1437 : };
1438 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1439 0 : return;
1440 : };
1441 : // update metrics
1442 0 : let batch = at - *throttle_done_at;
1443 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1444 0 : inner
1445 0 : .per_timeline_batch_wait_time
1446 0 : .observe(batch.as_secs_f64());
1447 0 : // state transition
1448 0 : inner.timings = SmgrOpTimerState::Executing {
1449 0 : execution_started_at: at,
1450 0 : }
1451 0 : }
1452 :
1453 : /// For all but the first caller, this is a no-op.
1454 : /// The first callers receives Some, subsequent ones None.
1455 : ///
1456 : /// See [`SmgrOpTimerState`] for more context.
1457 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1458 : // NB: unlike the other observe_* methods, this one take()s.
1459 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1460 0 : let Some(mut inner) = self.0.take() else {
1461 0 : return None;
1462 : };
1463 : let SmgrOpTimerState::Executing {
1464 0 : execution_started_at,
1465 0 : } = &inner.timings
1466 : else {
1467 0 : return None;
1468 : };
1469 : // update metrics
1470 0 : let execution = at - *execution_started_at;
1471 0 : inner
1472 0 : .global_execution_latency_histo
1473 0 : .observe(execution.as_secs_f64());
1474 0 : if let Some(per_timeline_execution_latency_histo) =
1475 0 : &inner.per_timeline_execution_latency_histo
1476 0 : {
1477 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1478 0 : }
1479 :
1480 : // state transition
1481 0 : inner.timings = SmgrOpTimerState::Flushing;
1482 0 :
1483 0 : // return the flush in progress object which
1484 0 : // will do the remaining metrics updates
1485 0 : let SmgrOpTimerInner {
1486 0 : global_flush_in_progress_micros,
1487 0 : per_timeline_flush_in_progress_micros,
1488 0 : ..
1489 0 : } = inner;
1490 0 : Some(SmgrOpFlushInProgress {
1491 0 : global_micros: global_flush_in_progress_micros,
1492 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1493 0 : })
1494 0 : }
1495 : }
1496 :
1497 : /// The last stage of request processing is serializing and flushing the request
1498 : /// into the TCP connection. We want to make slow flushes observable
1499 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1500 : /// to periodically bump the metric.
1501 : ///
1502 : /// If in the future we decide that we're not interested in live updates, we can
1503 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1504 : /// and remove this struct from the code base.
1505 : pub(crate) struct SmgrOpFlushInProgress {
1506 : global_micros: IntCounter,
1507 : per_timeline_micros: IntCounter,
1508 : }
1509 :
1510 : impl Drop for SmgrOpTimer {
1511 0 : fn drop(&mut self) {
1512 0 : // In case of early drop, update any of the remaining metrics with
1513 0 : // observations so that (started,finished) counter pairs balance out
1514 0 : // and all counters on the latency path have the the same number of
1515 0 : // observations.
1516 0 : // It's technically lying and it would be better if each metric had
1517 0 : // a separate label or similar for cancelled requests.
1518 0 : // But we don't have that right now and counter pairs balancing
1519 0 : // out is useful when using the metrics in panels and whatnot.
1520 0 : let now = Instant::now();
1521 0 : self.observe_throttle_start(now);
1522 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1523 0 : self.observe_execution_start(now);
1524 0 : let maybe_flush_timer = self.observe_execution_end(now);
1525 0 : drop(maybe_flush_timer);
1526 0 : }
1527 : }
1528 :
1529 : impl SmgrOpFlushInProgress {
1530 : /// The caller must guarantee that `socket_fd`` outlives this function.
1531 0 : pub(crate) async fn measure<Fut, O>(
1532 0 : self,
1533 0 : started_at: Instant,
1534 0 : mut fut: Fut,
1535 0 : socket_fd: RawFd,
1536 0 : ) -> O
1537 0 : where
1538 0 : Fut: std::future::Future<Output = O>,
1539 0 : {
1540 0 : let mut fut = std::pin::pin!(fut);
1541 0 :
1542 0 : let mut logged = false;
1543 0 : let mut last_counter_increment_at = started_at;
1544 0 : let mut observe_guard = scopeguard::guard(
1545 0 : |is_timeout| {
1546 0 : let now = Instant::now();
1547 0 :
1548 0 : // Increment counter
1549 0 : {
1550 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1551 0 : self.global_micros
1552 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1553 0 : self.per_timeline_micros
1554 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1555 0 : last_counter_increment_at = now;
1556 0 : }
1557 0 :
1558 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1559 0 : if is_timeout || logged {
1560 0 : logged = true;
1561 0 : let elapsed_total = now - started_at;
1562 0 : let msg = if is_timeout {
1563 0 : "slow flush ongoing"
1564 : } else {
1565 0 : "slow flush completed or cancelled"
1566 : };
1567 :
1568 0 : let (inq, outq) = {
1569 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1570 0 : #[cfg(target_os = "linux")]
1571 0 : unsafe {
1572 0 : (
1573 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1574 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1575 0 : )
1576 0 : }
1577 0 : #[cfg(not(target_os = "linux"))]
1578 0 : {
1579 0 : _ = socket_fd; // appease unused lint on macOS
1580 0 : (-1, -1)
1581 0 : }
1582 0 : };
1583 0 :
1584 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1585 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1586 0 : }
1587 0 : },
1588 0 : |mut observe| {
1589 0 : observe(false);
1590 0 : },
1591 0 : );
1592 :
1593 : loop {
1594 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1595 0 : Ok(v) => return v,
1596 0 : Err(_timeout) => {
1597 0 : (*observe_guard)(true);
1598 0 : }
1599 : }
1600 : }
1601 0 : }
1602 : }
1603 :
1604 : #[derive(
1605 : Debug,
1606 : Clone,
1607 : Copy,
1608 : IntoStaticStr,
1609 : strum_macros::EnumCount,
1610 0 : strum_macros::EnumIter,
1611 : strum_macros::FromRepr,
1612 : enum_map::Enum,
1613 : )]
1614 : #[strum(serialize_all = "snake_case")]
1615 : pub enum SmgrQueryType {
1616 : GetRelExists,
1617 : GetRelSize,
1618 : GetPageAtLsn,
1619 : GetDbSize,
1620 : GetSlruSegment,
1621 : #[cfg(feature = "testing")]
1622 : Test,
1623 : }
1624 :
1625 : pub(crate) struct SmgrQueryTimePerTimeline {
1626 : global_started: [IntCounter; SmgrQueryType::COUNT],
1627 : global_latency: [Histogram; SmgrQueryType::COUNT],
1628 : per_timeline_getpage_started: IntCounter,
1629 : per_timeline_getpage_latency: Histogram,
1630 : global_batch_size: Histogram,
1631 : per_timeline_batch_size: Histogram,
1632 : global_flush_in_progress_micros: IntCounter,
1633 : per_timeline_flush_in_progress_micros: IntCounter,
1634 : global_batch_wait_time: Histogram,
1635 : per_timeline_batch_wait_time: Histogram,
1636 : throttling: Arc<tenant_throttling::Pagestream>,
1637 : }
1638 :
1639 416 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1640 416 : register_int_counter_vec!(
1641 416 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1642 416 : "pageserver_smgr_query_started_global_count",
1643 416 : "Number of smgr queries started, aggregated by query type.",
1644 416 : &["smgr_query_type"],
1645 416 : )
1646 416 : .expect("failed to define a metric")
1647 416 : });
1648 :
1649 416 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1650 416 : register_int_counter_vec!(
1651 416 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1652 416 : "pageserver_smgr_query_started_count",
1653 416 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1654 416 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1655 416 : )
1656 416 : .expect("failed to define a metric")
1657 416 : });
1658 :
1659 : // Alias so all histograms recording per-timeline smgr timings use the same buckets.
1660 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
1661 :
1662 416 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1663 416 : register_histogram_vec!(
1664 416 : "pageserver_smgr_query_seconds",
1665 416 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1666 416 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1667 416 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1668 416 : )
1669 416 : .expect("failed to define a metric")
1670 416 : });
1671 :
1672 416 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1673 416 : [
1674 416 : 1,
1675 416 : 10,
1676 416 : 20,
1677 416 : 40,
1678 416 : 60,
1679 416 : 80,
1680 416 : 100,
1681 416 : 200,
1682 416 : 300,
1683 416 : 400,
1684 416 : 500,
1685 416 : 600,
1686 416 : 700,
1687 416 : 800,
1688 416 : 900,
1689 416 : 1_000, // 1ms
1690 416 : 2_000,
1691 416 : 4_000,
1692 416 : 6_000,
1693 416 : 8_000,
1694 416 : 10_000, // 10ms
1695 416 : 20_000,
1696 416 : 40_000,
1697 416 : 60_000,
1698 416 : 80_000,
1699 416 : 100_000,
1700 416 : 200_000,
1701 416 : 400_000,
1702 416 : 600_000,
1703 416 : 800_000,
1704 416 : 1_000_000, // 1s
1705 416 : 2_000_000,
1706 416 : 4_000_000,
1707 416 : 6_000_000,
1708 416 : 8_000_000,
1709 416 : 10_000_000, // 10s
1710 416 : 20_000_000,
1711 416 : 50_000_000,
1712 416 : 100_000_000,
1713 416 : 200_000_000,
1714 416 : 1_000_000_000, // 1000s
1715 416 : ]
1716 416 : .into_iter()
1717 416 : .map(Duration::from_micros)
1718 17056 : .map(|d| d.as_secs_f64())
1719 416 : .collect()
1720 416 : });
1721 :
1722 416 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1723 416 : register_histogram_vec!(
1724 416 : "pageserver_smgr_query_seconds_global",
1725 416 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1726 416 : &["smgr_query_type"],
1727 416 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1728 416 : )
1729 416 : .expect("failed to define a metric")
1730 416 : });
1731 :
1732 416 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1733 416 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1734 13312 : .map(|v| v.into())
1735 416 : .collect()
1736 416 : });
1737 :
1738 416 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1739 416 : register_histogram!(
1740 416 : "pageserver_page_service_batch_size_global",
1741 416 : "Batch size of pageserver page service requests",
1742 416 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1743 416 : )
1744 416 : .expect("failed to define a metric")
1745 416 : });
1746 :
1747 416 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1748 416 : let mut buckets = Vec::new();
1749 2912 : for i in 0.. {
1750 2912 : let bucket = 1 << i;
1751 2912 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1752 416 : break;
1753 2496 : }
1754 2496 : buckets.push(bucket.into());
1755 : }
1756 416 : buckets
1757 416 : });
1758 :
1759 416 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1760 416 : register_histogram_vec!(
1761 416 : "pageserver_page_service_batch_size",
1762 416 : "Batch size of pageserver page service requests",
1763 416 : &["tenant_id", "shard_id", "timeline_id"],
1764 416 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1765 416 : )
1766 416 : .expect("failed to define a metric")
1767 416 : });
1768 :
1769 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1770 0 : register_int_gauge_vec!(
1771 0 : "pageserver_page_service_config_max_batch_size",
1772 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1773 0 : Labels expose more of the configuration parameters.",
1774 0 : &["mode", "execution"]
1775 0 : )
1776 0 : .expect("failed to define a metric")
1777 0 : });
1778 :
1779 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1780 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1781 0 : let (label_values, value) = match conf {
1782 0 : PageServicePipeliningConfig::Serial => (["serial", "-"], 1),
1783 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1784 0 : max_batch_size,
1785 0 : execution,
1786 0 : }) => {
1787 0 : let mode = "pipelined";
1788 0 : let execution = match execution {
1789 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1790 0 : "concurrent-futures"
1791 : }
1792 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1793 : };
1794 0 : ([mode, execution], max_batch_size.get())
1795 : }
1796 : };
1797 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1798 0 : .with_label_values(&label_values)
1799 0 : .set(value.try_into().unwrap());
1800 0 : }
1801 :
1802 416 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1803 416 : register_int_counter_vec!(
1804 416 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1805 416 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1806 416 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1807 416 : easily discoverable in monitoring. \
1808 416 : Hence, this is NOT a completion latency historgram.",
1809 416 : &["tenant_id", "shard_id", "timeline_id"],
1810 416 : )
1811 416 : .expect("failed to define a metric")
1812 416 : });
1813 :
1814 416 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
1815 416 : register_int_counter!(
1816 416 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
1817 416 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
1818 416 : )
1819 416 : .expect("failed to define a metric")
1820 416 : });
1821 :
1822 416 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1823 416 : register_histogram_vec!(
1824 416 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
1825 416 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
1826 416 : &["tenant_id", "shard_id", "timeline_id"],
1827 416 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1828 416 : )
1829 416 : .expect("failed to define a metric")
1830 416 : });
1831 :
1832 416 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1833 416 : register_histogram!(
1834 416 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
1835 416 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
1836 416 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
1837 416 : )
1838 416 : .expect("failed to define a metric")
1839 416 : });
1840 :
1841 : impl SmgrQueryTimePerTimeline {
1842 920 : pub(crate) fn new(
1843 920 : tenant_shard_id: &TenantShardId,
1844 920 : timeline_id: &TimelineId,
1845 920 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
1846 920 : ) -> Self {
1847 920 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1848 920 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1849 920 : let timeline_id = timeline_id.to_string();
1850 5520 : let global_started = std::array::from_fn(|i| {
1851 5520 : let op = SmgrQueryType::from_repr(i).unwrap();
1852 5520 : SMGR_QUERY_STARTED_GLOBAL
1853 5520 : .get_metric_with_label_values(&[op.into()])
1854 5520 : .unwrap()
1855 5520 : });
1856 5520 : let global_latency = std::array::from_fn(|i| {
1857 5520 : let op = SmgrQueryType::from_repr(i).unwrap();
1858 5520 : SMGR_QUERY_TIME_GLOBAL
1859 5520 : .get_metric_with_label_values(&[op.into()])
1860 5520 : .unwrap()
1861 5520 : });
1862 920 :
1863 920 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
1864 920 : .get_metric_with_label_values(&[
1865 920 : SmgrQueryType::GetPageAtLsn.into(),
1866 920 : &tenant_id,
1867 920 : &shard_slug,
1868 920 : &timeline_id,
1869 920 : ])
1870 920 : .unwrap();
1871 920 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1872 920 : .get_metric_with_label_values(&[
1873 920 : SmgrQueryType::GetPageAtLsn.into(),
1874 920 : &tenant_id,
1875 920 : &shard_slug,
1876 920 : &timeline_id,
1877 920 : ])
1878 920 : .unwrap();
1879 920 :
1880 920 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
1881 920 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
1882 920 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1883 920 : .unwrap();
1884 920 :
1885 920 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
1886 920 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
1887 920 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1888 920 : .unwrap();
1889 920 :
1890 920 : let global_flush_in_progress_micros =
1891 920 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
1892 920 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
1893 920 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
1894 920 : .unwrap();
1895 920 :
1896 920 : Self {
1897 920 : global_started,
1898 920 : global_latency,
1899 920 : per_timeline_getpage_latency,
1900 920 : per_timeline_getpage_started,
1901 920 : global_batch_size,
1902 920 : per_timeline_batch_size,
1903 920 : global_flush_in_progress_micros,
1904 920 : per_timeline_flush_in_progress_micros,
1905 920 : global_batch_wait_time,
1906 920 : per_timeline_batch_wait_time,
1907 920 : throttling: pagestream_throttle_metrics,
1908 920 : }
1909 920 : }
1910 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
1911 0 : self.global_started[op as usize].inc();
1912 :
1913 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1914 0 : self.per_timeline_getpage_started.inc();
1915 0 : Some(self.per_timeline_getpage_latency.clone())
1916 : } else {
1917 0 : None
1918 : };
1919 :
1920 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
1921 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
1922 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
1923 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
1924 0 : per_timeline_flush_in_progress_micros: self
1925 0 : .per_timeline_flush_in_progress_micros
1926 0 : .clone(),
1927 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
1928 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
1929 0 : throttling: self.throttling.clone(),
1930 0 : timings: SmgrOpTimerState::Received { received_at },
1931 0 : }))
1932 0 : }
1933 :
1934 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
1935 0 : pub(crate) fn observe_getpage_batch_start(&self, batch_size: usize) {
1936 0 : self.global_batch_size.observe(batch_size as f64);
1937 0 : self.per_timeline_batch_size.observe(batch_size as f64);
1938 0 : }
1939 : }
1940 :
1941 : // keep in sync with control plane Go code so that we can validate
1942 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1943 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1944 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1945 0 : [
1946 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1947 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1948 0 : ]
1949 0 : .map(|ms| (ms as f64) / 1000.0)
1950 0 : });
1951 :
1952 : pub(crate) struct BasebackupQueryTime {
1953 : ok: Histogram,
1954 : error: Histogram,
1955 : client_error: Histogram,
1956 : }
1957 :
1958 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1959 0 : let vec = register_histogram_vec!(
1960 0 : "pageserver_basebackup_query_seconds",
1961 0 : "Histogram of basebackup queries durations, by result type",
1962 0 : &["result"],
1963 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1964 0 : )
1965 0 : .expect("failed to define a metric");
1966 0 : BasebackupQueryTime {
1967 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1968 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1969 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
1970 0 : }
1971 0 : });
1972 :
1973 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
1974 : parent: &'a BasebackupQueryTime,
1975 : start: std::time::Instant,
1976 : }
1977 :
1978 : impl BasebackupQueryTime {
1979 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
1980 0 : let start = Instant::now();
1981 0 : BasebackupQueryTimeOngoingRecording {
1982 0 : parent: self,
1983 0 : start,
1984 0 : }
1985 0 : }
1986 : }
1987 :
1988 : impl BasebackupQueryTimeOngoingRecording<'_> {
1989 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
1990 0 : let elapsed = self.start.elapsed().as_secs_f64();
1991 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
1992 0 : let metric = match res {
1993 0 : Ok(_) => &self.parent.ok,
1994 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
1995 0 : if is_expected_io_error(io_error) =>
1996 0 : {
1997 0 : &self.parent.client_error
1998 : }
1999 0 : Err(_) => &self.parent.error,
2000 : };
2001 0 : metric.observe(elapsed);
2002 0 : }
2003 : }
2004 :
2005 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2006 0 : register_int_counter_pair_vec!(
2007 0 : "pageserver_live_connections_started",
2008 0 : "Number of network connections that we started handling",
2009 0 : "pageserver_live_connections_finished",
2010 0 : "Number of network connections that we finished handling",
2011 0 : &["pageserver_connection_kind"]
2012 0 : )
2013 0 : .expect("failed to define a metric")
2014 0 : });
2015 :
2016 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2017 : pub(crate) enum ComputeCommandKind {
2018 : PageStreamV3,
2019 : PageStreamV2,
2020 : Basebackup,
2021 : Fullbackup,
2022 : LeaseLsn,
2023 : }
2024 :
2025 : pub(crate) struct ComputeCommandCounters {
2026 : map: EnumMap<ComputeCommandKind, IntCounter>,
2027 : }
2028 :
2029 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2030 0 : let inner = register_int_counter_vec!(
2031 0 : "pageserver_compute_commands",
2032 0 : "Number of compute -> pageserver commands processed",
2033 0 : &["command"]
2034 0 : )
2035 0 : .expect("failed to define a metric");
2036 0 :
2037 0 : ComputeCommandCounters {
2038 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2039 0 : let command = ComputeCommandKind::from_usize(i);
2040 0 : let command_str: &'static str = command.into();
2041 0 : inner.with_label_values(&[command_str])
2042 0 : })),
2043 0 : }
2044 0 : });
2045 :
2046 : impl ComputeCommandCounters {
2047 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2048 0 : &self.map[command]
2049 0 : }
2050 : }
2051 :
2052 : // remote storage metrics
2053 :
2054 408 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2055 408 : register_int_counter_pair_vec!(
2056 408 : "pageserver_remote_timeline_client_calls_started",
2057 408 : "Number of started calls to remote timeline client.",
2058 408 : "pageserver_remote_timeline_client_calls_finished",
2059 408 : "Number of finshed calls to remote timeline client.",
2060 408 : &[
2061 408 : "tenant_id",
2062 408 : "shard_id",
2063 408 : "timeline_id",
2064 408 : "file_kind",
2065 408 : "op_kind"
2066 408 : ],
2067 408 : )
2068 408 : .unwrap()
2069 408 : });
2070 :
2071 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2072 404 : Lazy::new(|| {
2073 404 : register_int_counter_vec!(
2074 404 : "pageserver_remote_timeline_client_bytes_started",
2075 404 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2076 404 : The increment happens when the operation is scheduled.",
2077 404 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2078 404 : )
2079 404 : .expect("failed to define a metric")
2080 404 : });
2081 :
2082 404 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2083 404 : register_int_counter_vec!(
2084 404 : "pageserver_remote_timeline_client_bytes_finished",
2085 404 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2086 404 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2087 404 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2088 404 : )
2089 404 : .expect("failed to define a metric")
2090 404 : });
2091 :
2092 : pub(crate) struct TenantManagerMetrics {
2093 : tenant_slots_attached: UIntGauge,
2094 : tenant_slots_secondary: UIntGauge,
2095 : tenant_slots_inprogress: UIntGauge,
2096 : pub(crate) tenant_slot_writes: IntCounter,
2097 : pub(crate) unexpected_errors: IntCounter,
2098 : }
2099 :
2100 : impl TenantManagerMetrics {
2101 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2102 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2103 4 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2104 4 : match slot {
2105 0 : TenantSlot::Attached(_) => {
2106 0 : self.tenant_slots_attached.inc();
2107 0 : }
2108 0 : TenantSlot::Secondary(_) => {
2109 0 : self.tenant_slots_secondary.inc();
2110 0 : }
2111 4 : TenantSlot::InProgress(_) => {
2112 4 : self.tenant_slots_inprogress.inc();
2113 4 : }
2114 : }
2115 4 : }
2116 :
2117 4 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2118 4 : match slot {
2119 4 : TenantSlot::Attached(_) => {
2120 4 : self.tenant_slots_attached.dec();
2121 4 : }
2122 0 : TenantSlot::Secondary(_) => {
2123 0 : self.tenant_slots_secondary.dec();
2124 0 : }
2125 0 : TenantSlot::InProgress(_) => {
2126 0 : self.tenant_slots_inprogress.dec();
2127 0 : }
2128 : }
2129 4 : }
2130 :
2131 : #[cfg(all(debug_assertions, not(test)))]
2132 0 : pub(crate) fn slots_total(&self) -> u64 {
2133 0 : self.tenant_slots_attached.get()
2134 0 : + self.tenant_slots_secondary.get()
2135 0 : + self.tenant_slots_inprogress.get()
2136 0 : }
2137 : }
2138 :
2139 4 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2140 4 : let tenant_slots = register_uint_gauge_vec!(
2141 4 : "pageserver_tenant_manager_slots",
2142 4 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2143 4 : &["mode"]
2144 4 : )
2145 4 : .expect("failed to define a metric");
2146 4 : TenantManagerMetrics {
2147 4 : tenant_slots_attached: tenant_slots
2148 4 : .get_metric_with_label_values(&["attached"])
2149 4 : .unwrap(),
2150 4 : tenant_slots_secondary: tenant_slots
2151 4 : .get_metric_with_label_values(&["secondary"])
2152 4 : .unwrap(),
2153 4 : tenant_slots_inprogress: tenant_slots
2154 4 : .get_metric_with_label_values(&["inprogress"])
2155 4 : .unwrap(),
2156 4 : tenant_slot_writes: register_int_counter!(
2157 4 : "pageserver_tenant_manager_slot_writes",
2158 4 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2159 4 : )
2160 4 : .expect("failed to define a metric"),
2161 4 : unexpected_errors: register_int_counter!(
2162 4 : "pageserver_tenant_manager_unexpected_errors_total",
2163 4 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2164 4 : )
2165 4 : .expect("failed to define a metric"),
2166 4 : }
2167 4 : });
2168 :
2169 : pub(crate) struct DeletionQueueMetrics {
2170 : pub(crate) keys_submitted: IntCounter,
2171 : pub(crate) keys_dropped: IntCounter,
2172 : pub(crate) keys_executed: IntCounter,
2173 : pub(crate) keys_validated: IntCounter,
2174 : pub(crate) dropped_lsn_updates: IntCounter,
2175 : pub(crate) unexpected_errors: IntCounter,
2176 : pub(crate) remote_errors: IntCounterVec,
2177 : }
2178 69 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2179 69 : DeletionQueueMetrics{
2180 69 :
2181 69 : keys_submitted: register_int_counter!(
2182 69 : "pageserver_deletion_queue_submitted_total",
2183 69 : "Number of objects submitted for deletion"
2184 69 : )
2185 69 : .expect("failed to define a metric"),
2186 69 :
2187 69 : keys_dropped: register_int_counter!(
2188 69 : "pageserver_deletion_queue_dropped_total",
2189 69 : "Number of object deletions dropped due to stale generation."
2190 69 : )
2191 69 : .expect("failed to define a metric"),
2192 69 :
2193 69 : keys_executed: register_int_counter!(
2194 69 : "pageserver_deletion_queue_executed_total",
2195 69 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2196 69 : )
2197 69 : .expect("failed to define a metric"),
2198 69 :
2199 69 : keys_validated: register_int_counter!(
2200 69 : "pageserver_deletion_queue_validated_total",
2201 69 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2202 69 : )
2203 69 : .expect("failed to define a metric"),
2204 69 :
2205 69 : dropped_lsn_updates: register_int_counter!(
2206 69 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2207 69 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2208 69 : )
2209 69 : .expect("failed to define a metric"),
2210 69 : unexpected_errors: register_int_counter!(
2211 69 : "pageserver_deletion_queue_unexpected_errors_total",
2212 69 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2213 69 : )
2214 69 : .expect("failed to define a metric"),
2215 69 : remote_errors: register_int_counter_vec!(
2216 69 : "pageserver_deletion_queue_remote_errors_total",
2217 69 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2218 69 : &["op_kind"],
2219 69 : )
2220 69 : .expect("failed to define a metric")
2221 69 : }
2222 69 : });
2223 :
2224 : pub(crate) struct SecondaryModeMetrics {
2225 : pub(crate) upload_heatmap: IntCounter,
2226 : pub(crate) upload_heatmap_errors: IntCounter,
2227 : pub(crate) upload_heatmap_duration: Histogram,
2228 : pub(crate) download_heatmap: IntCounter,
2229 : pub(crate) download_layer: IntCounter,
2230 : }
2231 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2232 0 : SecondaryModeMetrics {
2233 0 : upload_heatmap: register_int_counter!(
2234 0 : "pageserver_secondary_upload_heatmap",
2235 0 : "Number of heatmaps written to remote storage by attached tenants"
2236 0 : )
2237 0 : .expect("failed to define a metric"),
2238 0 : upload_heatmap_errors: register_int_counter!(
2239 0 : "pageserver_secondary_upload_heatmap_errors",
2240 0 : "Failures writing heatmap to remote storage"
2241 0 : )
2242 0 : .expect("failed to define a metric"),
2243 0 : upload_heatmap_duration: register_histogram!(
2244 0 : "pageserver_secondary_upload_heatmap_duration",
2245 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2246 0 : )
2247 0 : .expect("failed to define a metric"),
2248 0 : download_heatmap: register_int_counter!(
2249 0 : "pageserver_secondary_download_heatmap",
2250 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2251 0 : )
2252 0 : .expect("failed to define a metric"),
2253 0 : download_layer: register_int_counter!(
2254 0 : "pageserver_secondary_download_layer",
2255 0 : "Number of downloads of layers by secondary mode locations"
2256 0 : )
2257 0 : .expect("failed to define a metric"),
2258 0 : }
2259 0 : });
2260 :
2261 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2262 0 : register_uint_gauge_vec!(
2263 0 : "pageserver_secondary_resident_physical_size",
2264 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2265 0 : &["tenant_id", "shard_id"]
2266 0 : )
2267 0 : .expect("failed to define a metric")
2268 0 : });
2269 :
2270 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2271 0 : register_uint_gauge!(
2272 0 : "pageserver_utilization_score",
2273 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2274 0 : )
2275 0 : .expect("failed to define a metric")
2276 0 : });
2277 :
2278 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2279 0 : register_uint_gauge_vec!(
2280 0 : "pageserver_secondary_heatmap_total_size",
2281 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2282 0 : &["tenant_id", "shard_id"]
2283 0 : )
2284 0 : .expect("failed to define a metric")
2285 0 : });
2286 :
2287 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2288 : pub enum RemoteOpKind {
2289 : Upload,
2290 : Download,
2291 : Delete,
2292 : }
2293 : impl RemoteOpKind {
2294 30561 : pub fn as_str(&self) -> &'static str {
2295 30561 : match self {
2296 28779 : Self::Upload => "upload",
2297 136 : Self::Download => "download",
2298 1646 : Self::Delete => "delete",
2299 : }
2300 30561 : }
2301 : }
2302 :
2303 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2304 : pub enum RemoteOpFileKind {
2305 : Layer,
2306 : Index,
2307 : }
2308 : impl RemoteOpFileKind {
2309 30561 : pub fn as_str(&self) -> &'static str {
2310 30561 : match self {
2311 21424 : Self::Layer => "layer",
2312 9137 : Self::Index => "index",
2313 : }
2314 30561 : }
2315 : }
2316 :
2317 401 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2318 401 : register_histogram_vec!(
2319 401 : "pageserver_remote_operation_seconds",
2320 401 : "Time spent on remote storage operations. \
2321 401 : Grouped by tenant, timeline, operation_kind and status. \
2322 401 : Does not account for time spent waiting in remote timeline client's queues.",
2323 401 : &["file_kind", "op_kind", "status"]
2324 401 : )
2325 401 : .expect("failed to define a metric")
2326 401 : });
2327 :
2328 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2329 0 : register_int_counter_vec!(
2330 0 : "pageserver_tenant_task_events",
2331 0 : "Number of task start/stop/fail events.",
2332 0 : &["event"],
2333 0 : )
2334 0 : .expect("Failed to register tenant_task_events metric")
2335 0 : });
2336 :
2337 : pub struct BackgroundLoopSemaphoreMetrics {
2338 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2339 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2340 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2341 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2342 : }
2343 :
2344 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2345 40 : Lazy::new(|| {
2346 40 : let counters = register_int_counter_pair_vec!(
2347 40 : "pageserver_background_loop_semaphore_wait_start_count",
2348 40 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2349 40 : "pageserver_background_loop_semaphore_wait_finish_count",
2350 40 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2351 40 : &["task"],
2352 40 : )
2353 40 : .unwrap();
2354 40 :
2355 40 : let durations = register_histogram_vec!(
2356 40 : "pageserver_background_loop_semaphore_wait_seconds",
2357 40 : "Seconds spent waiting on background loop semaphore acquisition",
2358 40 : &["task"],
2359 40 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2360 40 : )
2361 40 : .unwrap();
2362 40 :
2363 40 : let waiting_tasks = register_int_gauge_vec!(
2364 40 : "pageserver_background_loop_semaphore_waiting_tasks",
2365 40 : "Number of background loop tasks waiting for semaphore",
2366 40 : &["task"],
2367 40 : )
2368 40 : .unwrap();
2369 40 :
2370 40 : let running_tasks = register_int_gauge_vec!(
2371 40 : "pageserver_background_loop_semaphore_running_tasks",
2372 40 : "Number of background loop tasks running concurrently",
2373 40 : &["task"],
2374 40 : )
2375 40 : .unwrap();
2376 40 :
2377 40 : BackgroundLoopSemaphoreMetrics {
2378 400 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2379 400 : let kind = BackgroundLoopKind::from_usize(i);
2380 400 : counters.with_label_values(&[kind.into()])
2381 400 : })),
2382 400 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2383 400 : let kind = BackgroundLoopKind::from_usize(i);
2384 400 : durations.with_label_values(&[kind.into()])
2385 400 : })),
2386 400 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2387 400 : let kind = BackgroundLoopKind::from_usize(i);
2388 400 : waiting_tasks.with_label_values(&[kind.into()])
2389 400 : })),
2390 400 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2391 400 : let kind = BackgroundLoopKind::from_usize(i);
2392 400 : running_tasks.with_label_values(&[kind.into()])
2393 400 : })),
2394 40 : }
2395 40 : });
2396 :
2397 : impl BackgroundLoopSemaphoreMetrics {
2398 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2399 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2400 725 : pub(crate) fn record(
2401 725 : &self,
2402 725 : task: BackgroundLoopKind,
2403 725 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2404 725 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2405 725 : }
2406 : }
2407 :
2408 : /// Records metrics for a background task.
2409 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2410 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2411 : task: BackgroundLoopKind,
2412 : start: Instant,
2413 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2414 : }
2415 :
2416 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2417 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2418 : /// `wait_start_count` and `waiting_tasks`.
2419 725 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2420 725 : metrics.waiting_tasks[task].inc();
2421 725 : Self {
2422 725 : metrics,
2423 725 : task,
2424 725 : start: Instant::now(),
2425 725 : wait_counter_guard: Some(metrics.counters[task].guard()),
2426 725 : }
2427 725 : }
2428 :
2429 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2430 725 : pub fn acquired(&mut self) -> Duration {
2431 725 : let waited = self.start.elapsed();
2432 725 : self.wait_counter_guard.take().expect("already acquired");
2433 725 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2434 725 : self.metrics.waiting_tasks[self.task].dec();
2435 725 : self.metrics.running_tasks[self.task].inc();
2436 725 : waited
2437 725 : }
2438 : }
2439 :
2440 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2441 : /// The task either completed or was cancelled.
2442 725 : fn drop(&mut self) {
2443 725 : if self.wait_counter_guard.take().is_some() {
2444 0 : // Waiting.
2445 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2446 0 : self.metrics.waiting_tasks[self.task].dec();
2447 725 : } else {
2448 725 : // Running.
2449 725 : self.metrics.running_tasks[self.task].dec();
2450 725 : }
2451 725 : }
2452 : }
2453 :
2454 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2455 0 : register_int_counter_vec!(
2456 0 : "pageserver_background_loop_period_overrun_count",
2457 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2458 0 : &["task", "period"],
2459 0 : )
2460 0 : .expect("failed to define a metric")
2461 0 : });
2462 :
2463 : // walreceiver metrics
2464 :
2465 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2466 0 : register_int_counter!(
2467 0 : "pageserver_walreceiver_started_connections_total",
2468 0 : "Number of started walreceiver connections"
2469 0 : )
2470 0 : .expect("failed to define a metric")
2471 0 : });
2472 :
2473 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2474 0 : register_int_gauge!(
2475 0 : "pageserver_walreceiver_active_managers",
2476 0 : "Number of active walreceiver managers"
2477 0 : )
2478 0 : .expect("failed to define a metric")
2479 0 : });
2480 :
2481 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2482 0 : register_int_counter_vec!(
2483 0 : "pageserver_walreceiver_switches_total",
2484 0 : "Number of walreceiver manager change_connection calls",
2485 0 : &["reason"]
2486 0 : )
2487 0 : .expect("failed to define a metric")
2488 0 : });
2489 :
2490 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2491 0 : register_int_counter!(
2492 0 : "pageserver_walreceiver_broker_updates_total",
2493 0 : "Number of received broker updates in walreceiver"
2494 0 : )
2495 0 : .expect("failed to define a metric")
2496 0 : });
2497 :
2498 4 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2499 4 : register_int_counter_vec!(
2500 4 : "pageserver_walreceiver_candidates_events_total",
2501 4 : "Number of walreceiver candidate events",
2502 4 : &["event"]
2503 4 : )
2504 4 : .expect("failed to define a metric")
2505 4 : });
2506 :
2507 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2508 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2509 :
2510 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2511 4 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2512 :
2513 : // Metrics collected on WAL redo operations
2514 : //
2515 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2516 : // for access to the postgres process ('wait') since there is only one for
2517 : // each tenant.
2518 :
2519 : /// Time buckets are small because we want to be able to measure the
2520 : /// smallest redo processing times. These buckets allow us to measure down
2521 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2522 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2523 : ///
2524 : /// Values up to 1s are recorded because metrics show that we have redo
2525 : /// durations and lock times larger than 0.250s.
2526 : macro_rules! redo_histogram_time_buckets {
2527 : () => {
2528 : vec![
2529 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2530 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2531 : 1.000_000,
2532 : ]
2533 : };
2534 : }
2535 :
2536 : /// While we're at it, also measure the amount of records replayed in each
2537 : /// operation. We have a global 'total replayed' counter, but that's not
2538 : /// as useful as 'what is the skew for how many records we replay in one
2539 : /// operation'.
2540 : macro_rules! redo_histogram_count_buckets {
2541 : () => {
2542 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2543 : };
2544 : }
2545 :
2546 : macro_rules! redo_bytes_histogram_count_buckets {
2547 : () => {
2548 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2549 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2550 : vec![
2551 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2552 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2553 : ]
2554 : };
2555 : }
2556 :
2557 : pub(crate) struct WalIngestMetrics {
2558 : pub(crate) bytes_received: IntCounter,
2559 : pub(crate) records_received: IntCounter,
2560 : pub(crate) records_observed: IntCounter,
2561 : pub(crate) records_committed: IntCounter,
2562 : pub(crate) records_filtered: IntCounter,
2563 : pub(crate) values_committed_metadata_images: IntCounter,
2564 : pub(crate) values_committed_metadata_deltas: IntCounter,
2565 : pub(crate) values_committed_data_images: IntCounter,
2566 : pub(crate) values_committed_data_deltas: IntCounter,
2567 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2568 : }
2569 :
2570 : impl WalIngestMetrics {
2571 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2572 0 : if stats.metadata_images > 0 {
2573 0 : self.values_committed_metadata_images
2574 0 : .inc_by(stats.metadata_images);
2575 0 : }
2576 0 : if stats.metadata_deltas > 0 {
2577 0 : self.values_committed_metadata_deltas
2578 0 : .inc_by(stats.metadata_deltas);
2579 0 : }
2580 0 : if stats.data_images > 0 {
2581 0 : self.values_committed_data_images.inc_by(stats.data_images);
2582 0 : }
2583 0 : if stats.data_deltas > 0 {
2584 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2585 0 : }
2586 0 : }
2587 : }
2588 :
2589 20 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2590 20 : let values_committed = register_int_counter_vec!(
2591 20 : "pageserver_wal_ingest_values_committed",
2592 20 : "Number of values committed to pageserver storage from WAL records",
2593 20 : &["class", "kind"],
2594 20 : )
2595 20 : .expect("failed to define a metric");
2596 20 :
2597 20 : WalIngestMetrics {
2598 20 : bytes_received: register_int_counter!(
2599 20 : "pageserver_wal_ingest_bytes_received",
2600 20 : "Bytes of WAL ingested from safekeepers",
2601 20 : )
2602 20 : .unwrap(),
2603 20 : records_received: register_int_counter!(
2604 20 : "pageserver_wal_ingest_records_received",
2605 20 : "Number of WAL records received from safekeepers"
2606 20 : )
2607 20 : .expect("failed to define a metric"),
2608 20 : records_observed: register_int_counter!(
2609 20 : "pageserver_wal_ingest_records_observed",
2610 20 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2611 20 : )
2612 20 : .expect("failed to define a metric"),
2613 20 : records_committed: register_int_counter!(
2614 20 : "pageserver_wal_ingest_records_committed",
2615 20 : "Number of WAL records which resulted in writes to pageserver storage"
2616 20 : )
2617 20 : .expect("failed to define a metric"),
2618 20 : records_filtered: register_int_counter!(
2619 20 : "pageserver_wal_ingest_records_filtered",
2620 20 : "Number of WAL records filtered out due to sharding"
2621 20 : )
2622 20 : .expect("failed to define a metric"),
2623 20 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2624 20 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2625 20 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2626 20 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2627 20 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2628 20 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2629 20 : "Total number of zero gap blocks written on relation extends"
2630 20 : )
2631 20 : .expect("failed to define a metric"),
2632 20 : }
2633 20 : });
2634 :
2635 416 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2636 416 : register_int_counter_vec!(
2637 416 : "pageserver_timeline_wal_records_received",
2638 416 : "Number of WAL records received per shard",
2639 416 : &["tenant_id", "shard_id", "timeline_id"]
2640 416 : )
2641 416 : .expect("failed to define a metric")
2642 416 : });
2643 :
2644 12 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2645 12 : register_histogram!(
2646 12 : "pageserver_wal_redo_seconds",
2647 12 : "Time spent on WAL redo",
2648 12 : redo_histogram_time_buckets!()
2649 12 : )
2650 12 : .expect("failed to define a metric")
2651 12 : });
2652 :
2653 12 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2654 12 : register_histogram!(
2655 12 : "pageserver_wal_redo_records_histogram",
2656 12 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2657 12 : redo_histogram_count_buckets!(),
2658 12 : )
2659 12 : .expect("failed to define a metric")
2660 12 : });
2661 :
2662 12 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2663 12 : register_histogram!(
2664 12 : "pageserver_wal_redo_bytes_histogram",
2665 12 : "Histogram of number of records replayed per redo sent to Postgres",
2666 12 : redo_bytes_histogram_count_buckets!(),
2667 12 : )
2668 12 : .expect("failed to define a metric")
2669 12 : });
2670 :
2671 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2672 12 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2673 12 : register_int_counter!(
2674 12 : "pageserver_replayed_wal_records_total",
2675 12 : "Number of WAL records replayed in WAL redo process"
2676 12 : )
2677 12 : .unwrap()
2678 12 : });
2679 :
2680 : #[rustfmt::skip]
2681 16 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2682 16 : register_histogram!(
2683 16 : "pageserver_wal_redo_process_launch_duration",
2684 16 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2685 16 : vec![
2686 16 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2687 16 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2688 16 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2689 16 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2690 16 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2691 16 : ],
2692 16 : )
2693 16 : .expect("failed to define a metric")
2694 16 : });
2695 :
2696 : pub(crate) struct WalRedoProcessCounters {
2697 : pub(crate) started: IntCounter,
2698 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2699 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2700 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2701 : }
2702 :
2703 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2704 : pub(crate) enum WalRedoKillCause {
2705 : WalRedoProcessDrop,
2706 : NoLeakChildDrop,
2707 : Startup,
2708 : }
2709 :
2710 : impl Default for WalRedoProcessCounters {
2711 16 : fn default() -> Self {
2712 16 : let started = register_int_counter!(
2713 16 : "pageserver_wal_redo_process_started_total",
2714 16 : "Number of WAL redo processes started",
2715 16 : )
2716 16 : .unwrap();
2717 16 :
2718 16 : let killed = register_int_counter_vec!(
2719 16 : "pageserver_wal_redo_process_stopped_total",
2720 16 : "Number of WAL redo processes stopped",
2721 16 : &["cause"],
2722 16 : )
2723 16 : .unwrap();
2724 16 :
2725 16 : let active_stderr_logger_tasks_started = register_int_counter!(
2726 16 : "pageserver_walredo_stderr_logger_tasks_started_total",
2727 16 : "Number of active walredo stderr logger tasks that have started",
2728 16 : )
2729 16 : .unwrap();
2730 16 :
2731 16 : let active_stderr_logger_tasks_finished = register_int_counter!(
2732 16 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2733 16 : "Number of active walredo stderr logger tasks that have finished",
2734 16 : )
2735 16 : .unwrap();
2736 16 :
2737 16 : Self {
2738 16 : started,
2739 48 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2740 48 : let cause = WalRedoKillCause::from_usize(i);
2741 48 : let cause_str: &'static str = cause.into();
2742 48 : killed.with_label_values(&[cause_str])
2743 48 : })),
2744 16 : active_stderr_logger_tasks_started,
2745 16 : active_stderr_logger_tasks_finished,
2746 16 : }
2747 16 : }
2748 : }
2749 :
2750 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2751 : Lazy::new(WalRedoProcessCounters::default);
2752 :
2753 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2754 : pub(crate) struct StorageTimeMetricsTimer {
2755 : metrics: StorageTimeMetrics,
2756 : start: Instant,
2757 : }
2758 :
2759 : impl StorageTimeMetricsTimer {
2760 4322 : fn new(metrics: StorageTimeMetrics) -> Self {
2761 4322 : Self {
2762 4322 : metrics,
2763 4322 : start: Instant::now(),
2764 4322 : }
2765 4322 : }
2766 :
2767 : /// Returns the elapsed duration of the timer.
2768 4322 : pub fn elapsed(&self) -> Duration {
2769 4322 : self.start.elapsed()
2770 4322 : }
2771 :
2772 : /// Record the time from creation to now and return it.
2773 4322 : pub fn stop_and_record(self) -> Duration {
2774 4322 : let duration = self.elapsed();
2775 4322 : let seconds = duration.as_secs_f64();
2776 4322 : self.metrics.timeline_sum.inc_by(seconds);
2777 4322 : self.metrics.timeline_count.inc();
2778 4322 : self.metrics.global_histogram.observe(seconds);
2779 4322 : duration
2780 4322 : }
2781 :
2782 : /// Turns this timer into a timer, which will always record -- usually this means recording
2783 : /// regardless an early `?` path was taken in a function.
2784 40 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2785 40 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2786 40 : }
2787 : }
2788 :
2789 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2790 :
2791 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2792 40 : fn drop(&mut self) {
2793 40 : if let Some(inner) = self.0.take() {
2794 40 : inner.stop_and_record();
2795 40 : }
2796 40 : }
2797 : }
2798 :
2799 : impl AlwaysRecordingStorageTimeMetricsTimer {
2800 : /// Returns the elapsed duration of the timer.
2801 0 : pub fn elapsed(&self) -> Duration {
2802 0 : self.0.as_ref().expect("not dropped yet").elapsed()
2803 0 : }
2804 : }
2805 :
2806 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2807 : /// timeline total sum and count.
2808 : #[derive(Clone, Debug)]
2809 : pub(crate) struct StorageTimeMetrics {
2810 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2811 : timeline_sum: Counter,
2812 : /// Number of oeprations, per operation, tenant_id and timeline_id
2813 : timeline_count: IntCounter,
2814 : /// Global histogram having only the "operation" label.
2815 : global_histogram: Histogram,
2816 : }
2817 :
2818 : impl StorageTimeMetrics {
2819 8280 : pub fn new(
2820 8280 : operation: StorageTimeOperation,
2821 8280 : tenant_id: &str,
2822 8280 : shard_id: &str,
2823 8280 : timeline_id: &str,
2824 8280 : ) -> Self {
2825 8280 : let operation: &'static str = operation.into();
2826 8280 :
2827 8280 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2828 8280 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2829 8280 : .unwrap();
2830 8280 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2831 8280 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2832 8280 : .unwrap();
2833 8280 : let global_histogram = STORAGE_TIME_GLOBAL
2834 8280 : .get_metric_with_label_values(&[operation])
2835 8280 : .unwrap();
2836 8280 :
2837 8280 : StorageTimeMetrics {
2838 8280 : timeline_sum,
2839 8280 : timeline_count,
2840 8280 : global_histogram,
2841 8280 : }
2842 8280 : }
2843 :
2844 : /// Starts timing a new operation.
2845 : ///
2846 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2847 4322 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2848 4322 : StorageTimeMetricsTimer::new(self.clone())
2849 4322 : }
2850 : }
2851 :
2852 : pub(crate) struct TimelineMetrics {
2853 : tenant_id: String,
2854 : shard_id: String,
2855 : timeline_id: String,
2856 : pub flush_time_histo: StorageTimeMetrics,
2857 : pub flush_delay_histo: StorageTimeMetrics,
2858 : pub compact_time_histo: StorageTimeMetrics,
2859 : pub create_images_time_histo: StorageTimeMetrics,
2860 : pub logical_size_histo: StorageTimeMetrics,
2861 : pub imitate_logical_size_histo: StorageTimeMetrics,
2862 : pub load_layer_map_histo: StorageTimeMetrics,
2863 : pub garbage_collect_histo: StorageTimeMetrics,
2864 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2865 : pub last_record_lsn_gauge: IntGauge,
2866 : pub disk_consistent_lsn_gauge: IntGauge,
2867 : pub pitr_history_size: UIntGauge,
2868 : pub archival_size: UIntGauge,
2869 : pub layers_per_read: Histogram,
2870 : pub standby_horizon_gauge: IntGauge,
2871 : pub resident_physical_size_gauge: UIntGauge,
2872 : pub visible_physical_size_gauge: UIntGauge,
2873 : /// copy of LayeredTimeline.current_logical_size
2874 : pub current_logical_size_gauge: UIntGauge,
2875 : pub aux_file_size_gauge: IntGauge,
2876 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2877 : pub evictions: IntCounter,
2878 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2879 : /// Number of valid LSN leases.
2880 : pub valid_lsn_lease_count_gauge: UIntGauge,
2881 : pub wal_records_received: IntCounter,
2882 : pub storage_io_size: StorageIoSizeMetrics,
2883 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
2884 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
2885 : shutdown: std::sync::atomic::AtomicBool,
2886 : }
2887 :
2888 : impl TimelineMetrics {
2889 920 : pub fn new(
2890 920 : tenant_shard_id: &TenantShardId,
2891 920 : timeline_id_raw: &TimelineId,
2892 920 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2893 920 : ) -> Self {
2894 920 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2895 920 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2896 920 : let timeline_id = timeline_id_raw.to_string();
2897 920 : let flush_time_histo = StorageTimeMetrics::new(
2898 920 : StorageTimeOperation::LayerFlush,
2899 920 : &tenant_id,
2900 920 : &shard_id,
2901 920 : &timeline_id,
2902 920 : );
2903 920 : let flush_delay_histo = StorageTimeMetrics::new(
2904 920 : StorageTimeOperation::LayerFlushDelay,
2905 920 : &tenant_id,
2906 920 : &shard_id,
2907 920 : &timeline_id,
2908 920 : );
2909 920 : let compact_time_histo = StorageTimeMetrics::new(
2910 920 : StorageTimeOperation::Compact,
2911 920 : &tenant_id,
2912 920 : &shard_id,
2913 920 : &timeline_id,
2914 920 : );
2915 920 : let create_images_time_histo = StorageTimeMetrics::new(
2916 920 : StorageTimeOperation::CreateImages,
2917 920 : &tenant_id,
2918 920 : &shard_id,
2919 920 : &timeline_id,
2920 920 : );
2921 920 : let logical_size_histo = StorageTimeMetrics::new(
2922 920 : StorageTimeOperation::LogicalSize,
2923 920 : &tenant_id,
2924 920 : &shard_id,
2925 920 : &timeline_id,
2926 920 : );
2927 920 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2928 920 : StorageTimeOperation::ImitateLogicalSize,
2929 920 : &tenant_id,
2930 920 : &shard_id,
2931 920 : &timeline_id,
2932 920 : );
2933 920 : let load_layer_map_histo = StorageTimeMetrics::new(
2934 920 : StorageTimeOperation::LoadLayerMap,
2935 920 : &tenant_id,
2936 920 : &shard_id,
2937 920 : &timeline_id,
2938 920 : );
2939 920 : let garbage_collect_histo = StorageTimeMetrics::new(
2940 920 : StorageTimeOperation::Gc,
2941 920 : &tenant_id,
2942 920 : &shard_id,
2943 920 : &timeline_id,
2944 920 : );
2945 920 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2946 920 : StorageTimeOperation::FindGcCutoffs,
2947 920 : &tenant_id,
2948 920 : &shard_id,
2949 920 : &timeline_id,
2950 920 : );
2951 920 : let last_record_lsn_gauge = LAST_RECORD_LSN
2952 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2953 920 : .unwrap();
2954 920 :
2955 920 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
2956 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2957 920 : .unwrap();
2958 920 :
2959 920 : let pitr_history_size = PITR_HISTORY_SIZE
2960 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2961 920 : .unwrap();
2962 920 :
2963 920 : let archival_size = TIMELINE_ARCHIVE_SIZE
2964 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2965 920 : .unwrap();
2966 920 :
2967 920 : let layers_per_read = LAYERS_PER_READ
2968 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2969 920 : .unwrap();
2970 920 :
2971 920 : let standby_horizon_gauge = STANDBY_HORIZON
2972 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2973 920 : .unwrap();
2974 920 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2975 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2976 920 : .unwrap();
2977 920 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
2978 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2979 920 : .unwrap();
2980 920 : // TODO: we shouldn't expose this metric
2981 920 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2982 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2983 920 : .unwrap();
2984 920 : let aux_file_size_gauge = AUX_FILE_SIZE
2985 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2986 920 : .unwrap();
2987 920 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
2988 920 : let directory_entries_count_gauge_closure = {
2989 920 : let tenant_shard_id = *tenant_shard_id;
2990 920 : let timeline_id_raw = *timeline_id_raw;
2991 0 : move || {
2992 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2993 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2994 0 : let timeline_id = timeline_id_raw.to_string();
2995 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
2996 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2997 0 : .unwrap();
2998 0 : gauge
2999 0 : }
3000 : };
3001 920 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3002 920 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3003 920 : let evictions = EVICTIONS
3004 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3005 920 : .unwrap();
3006 920 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3007 920 : .build(&tenant_id, &shard_id, &timeline_id);
3008 920 :
3009 920 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3010 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3011 920 : .unwrap();
3012 920 :
3013 920 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3014 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3015 920 : .unwrap();
3016 920 :
3017 920 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3018 920 :
3019 920 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3020 920 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3021 920 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3022 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3023 920 : .unwrap(),
3024 920 : };
3025 920 :
3026 920 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3027 920 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3028 920 : .unwrap();
3029 920 :
3030 920 : TimelineMetrics {
3031 920 : tenant_id,
3032 920 : shard_id,
3033 920 : timeline_id,
3034 920 : flush_time_histo,
3035 920 : flush_delay_histo,
3036 920 : compact_time_histo,
3037 920 : create_images_time_histo,
3038 920 : logical_size_histo,
3039 920 : imitate_logical_size_histo,
3040 920 : garbage_collect_histo,
3041 920 : find_gc_cutoffs_histo,
3042 920 : load_layer_map_histo,
3043 920 : last_record_lsn_gauge,
3044 920 : disk_consistent_lsn_gauge,
3045 920 : pitr_history_size,
3046 920 : archival_size,
3047 920 : layers_per_read,
3048 920 : standby_horizon_gauge,
3049 920 : resident_physical_size_gauge,
3050 920 : visible_physical_size_gauge,
3051 920 : current_logical_size_gauge,
3052 920 : aux_file_size_gauge,
3053 920 : directory_entries_count_gauge,
3054 920 : evictions,
3055 920 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3056 920 : evictions_with_low_residence_duration,
3057 920 : ),
3058 920 : storage_io_size,
3059 920 : valid_lsn_lease_count_gauge,
3060 920 : wal_records_received,
3061 920 : wait_lsn_in_progress_micros,
3062 920 : wait_lsn_start_finish_counterpair,
3063 920 : shutdown: std::sync::atomic::AtomicBool::default(),
3064 920 : }
3065 920 : }
3066 :
3067 3160 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3068 3160 : self.resident_physical_size_add(sz);
3069 3160 : }
3070 :
3071 1090 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3072 1090 : self.resident_physical_size_gauge.sub(sz);
3073 1090 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3074 1090 : }
3075 :
3076 3432 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3077 3432 : self.resident_physical_size_gauge.add(sz);
3078 3432 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3079 3432 : }
3080 :
3081 20 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3082 20 : self.resident_physical_size_gauge.get()
3083 20 : }
3084 :
3085 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3086 5250 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3087 5250 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3088 2847 : true => LayerLevel::L0,
3089 2403 : false => LayerLevel::L1,
3090 : };
3091 5250 : let kind = match layer_desc.is_delta() {
3092 4361 : true => LayerKind::Delta,
3093 889 : false => LayerKind::Image,
3094 : };
3095 5250 : [
3096 5250 : &self.tenant_id,
3097 5250 : &self.shard_id,
3098 5250 : &self.timeline_id,
3099 5250 : level.into(),
3100 5250 : kind.into(),
3101 5250 : ]
3102 5250 : }
3103 :
3104 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3105 4720 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3106 4720 : [
3107 4720 : &self.tenant_id,
3108 4720 : &self.shard_id,
3109 4720 : &self.timeline_id,
3110 4720 : LayerLevel::Frozen.into(),
3111 4720 : LayerKind::Delta.into(), // by definition
3112 4720 : ]
3113 4720 : }
3114 :
3115 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3116 2360 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3117 2360 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3118 2360 : let labels = self.make_frozen_layer_labels(layer);
3119 2360 : let size = layer.try_len().expect("frozen layer should have no writer");
3120 2360 : TIMELINE_LAYER_COUNT
3121 2360 : .get_metric_with_label_values(&labels)
3122 2360 : .unwrap()
3123 2360 : .dec();
3124 2360 : TIMELINE_LAYER_SIZE
3125 2360 : .get_metric_with_label_values(&labels)
3126 2360 : .unwrap()
3127 2360 : .sub(size);
3128 2360 : }
3129 :
3130 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3131 2360 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3132 2360 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3133 2360 : let labels = self.make_frozen_layer_labels(layer);
3134 2360 : let size = layer.try_len().expect("frozen layer should have no writer");
3135 2360 : TIMELINE_LAYER_COUNT
3136 2360 : .get_metric_with_label_values(&labels)
3137 2360 : .unwrap()
3138 2360 : .inc();
3139 2360 : TIMELINE_LAYER_SIZE
3140 2360 : .get_metric_with_label_values(&labels)
3141 2360 : .unwrap()
3142 2360 : .add(size);
3143 2360 : }
3144 :
3145 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3146 1386 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3147 1386 : let labels = self.make_layer_labels(layer_desc);
3148 1386 : TIMELINE_LAYER_COUNT
3149 1386 : .get_metric_with_label_values(&labels)
3150 1386 : .unwrap()
3151 1386 : .dec();
3152 1386 : TIMELINE_LAYER_SIZE
3153 1386 : .get_metric_with_label_values(&labels)
3154 1386 : .unwrap()
3155 1386 : .sub(layer_desc.file_size);
3156 1386 : }
3157 :
3158 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3159 3864 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3160 3864 : let labels = self.make_layer_labels(layer_desc);
3161 3864 : TIMELINE_LAYER_COUNT
3162 3864 : .get_metric_with_label_values(&labels)
3163 3864 : .unwrap()
3164 3864 : .inc();
3165 3864 : TIMELINE_LAYER_SIZE
3166 3864 : .get_metric_with_label_values(&labels)
3167 3864 : .unwrap()
3168 3864 : .add(layer_desc.file_size);
3169 3864 : }
3170 :
3171 20 : pub(crate) fn shutdown(&self) {
3172 20 : let was_shutdown = self
3173 20 : .shutdown
3174 20 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3175 20 :
3176 20 : if was_shutdown {
3177 : // this happens on tenant deletion because tenant first shuts down timelines, then
3178 : // invokes timeline deletion which first shuts down the timeline again.
3179 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3180 0 : return;
3181 20 : }
3182 20 :
3183 20 : let tenant_id = &self.tenant_id;
3184 20 : let timeline_id = &self.timeline_id;
3185 20 : let shard_id = &self.shard_id;
3186 20 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3187 20 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3188 20 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3189 20 : {
3190 20 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3191 20 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3192 20 : }
3193 20 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3194 20 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3195 20 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3196 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3197 20 : }
3198 :
3199 20 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3200 20 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3201 :
3202 80 : for ref level in LayerLevel::iter() {
3203 180 : for ref kind in LayerKind::iter() {
3204 120 : let labels: [&str; 5] =
3205 120 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3206 120 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3207 120 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3208 120 : }
3209 : }
3210 :
3211 20 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3212 20 :
3213 20 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3214 20 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3215 20 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3216 20 :
3217 20 : self.evictions_with_low_residence_duration
3218 20 : .write()
3219 20 : .unwrap()
3220 20 : .remove(tenant_id, shard_id, timeline_id);
3221 :
3222 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3223 : // removed at the end of it. The idea is to have the metrics outlive the
3224 : // entity during which they're observed, e.g., the smgr metrics shall
3225 : // outlive an individual smgr connection, but not the timeline.
3226 :
3227 200 : for op in StorageTimeOperation::VARIANTS {
3228 180 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3229 180 : op,
3230 180 : tenant_id,
3231 180 : shard_id,
3232 180 : timeline_id,
3233 180 : ]);
3234 180 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3235 180 : op,
3236 180 : tenant_id,
3237 180 : shard_id,
3238 180 : timeline_id,
3239 180 : ]);
3240 180 : }
3241 :
3242 60 : for op in StorageIoSizeOperation::VARIANTS {
3243 40 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3244 40 : }
3245 :
3246 : let _ =
3247 20 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3248 20 :
3249 20 : {
3250 20 : let mut res = [Ok(()), Ok(())];
3251 20 : WAIT_LSN_START_FINISH_COUNTERPAIR
3252 20 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3253 20 : }
3254 20 :
3255 20 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3256 20 : SmgrQueryType::GetPageAtLsn.into(),
3257 20 : tenant_id,
3258 20 : shard_id,
3259 20 : timeline_id,
3260 20 : ]);
3261 20 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3262 20 : SmgrQueryType::GetPageAtLsn.into(),
3263 20 : tenant_id,
3264 20 : shard_id,
3265 20 : timeline_id,
3266 20 : ]);
3267 20 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3268 20 : tenant_id,
3269 20 : shard_id,
3270 20 : timeline_id,
3271 20 : ]);
3272 20 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3273 20 : tenant_id,
3274 20 : shard_id,
3275 20 : timeline_id,
3276 20 : ]);
3277 20 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3278 20 : tenant_id,
3279 20 : shard_id,
3280 20 : timeline_id,
3281 20 : ]);
3282 20 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3283 20 : tenant_id,
3284 20 : shard_id,
3285 20 : timeline_id,
3286 20 : ]);
3287 20 : }
3288 : }
3289 :
3290 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3291 12 : // Only shard zero deals in synthetic sizes
3292 12 : if tenant_shard_id.is_shard_zero() {
3293 12 : let tid = tenant_shard_id.tenant_id.to_string();
3294 12 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3295 12 : }
3296 :
3297 12 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3298 12 :
3299 12 : // we leave the BROKEN_TENANTS_SET entry if any
3300 12 : }
3301 :
3302 : /// Maintain a per timeline gauge in addition to the global gauge.
3303 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3304 : last_set: AtomicU64,
3305 : gauge: UIntGauge,
3306 : }
3307 :
3308 : impl PerTimelineRemotePhysicalSizeGauge {
3309 940 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3310 940 : Self {
3311 940 : last_set: AtomicU64::new(0),
3312 940 : gauge: per_timeline_gauge,
3313 940 : }
3314 940 : }
3315 3886 : pub(crate) fn set(&self, sz: u64) {
3316 3886 : self.gauge.set(sz);
3317 3886 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3318 3886 : if sz < prev {
3319 77 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3320 3809 : } else {
3321 3809 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3322 3809 : };
3323 3886 : }
3324 4 : pub(crate) fn get(&self) -> u64 {
3325 4 : self.gauge.get()
3326 4 : }
3327 : }
3328 :
3329 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3330 40 : fn drop(&mut self) {
3331 40 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3332 40 : }
3333 : }
3334 :
3335 : pub(crate) struct RemoteTimelineClientMetrics {
3336 : tenant_id: String,
3337 : shard_id: String,
3338 : timeline_id: String,
3339 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3340 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3341 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3342 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3343 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3344 : }
3345 :
3346 : impl RemoteTimelineClientMetrics {
3347 940 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3348 940 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3349 940 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3350 940 : let timeline_id_str = timeline_id.to_string();
3351 940 :
3352 940 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3353 940 : REMOTE_PHYSICAL_SIZE
3354 940 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3355 940 : .unwrap(),
3356 940 : );
3357 940 :
3358 940 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3359 940 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3360 940 : .unwrap();
3361 940 :
3362 940 : RemoteTimelineClientMetrics {
3363 940 : tenant_id: tenant_id_str,
3364 940 : shard_id: shard_id_str,
3365 940 : timeline_id: timeline_id_str,
3366 940 : calls: Mutex::new(HashMap::default()),
3367 940 : bytes_started_counter: Mutex::new(HashMap::default()),
3368 940 : bytes_finished_counter: Mutex::new(HashMap::default()),
3369 940 : remote_physical_size_gauge,
3370 940 : projected_remote_consistent_lsn_gauge,
3371 940 : }
3372 940 : }
3373 :
3374 6112 : pub fn remote_operation_time(
3375 6112 : &self,
3376 6112 : file_kind: &RemoteOpFileKind,
3377 6112 : op_kind: &RemoteOpKind,
3378 6112 : status: &'static str,
3379 6112 : ) -> Histogram {
3380 6112 : let key = (file_kind.as_str(), op_kind.as_str(), status);
3381 6112 : REMOTE_OPERATION_TIME
3382 6112 : .get_metric_with_label_values(&[key.0, key.1, key.2])
3383 6112 : .unwrap()
3384 6112 : }
3385 :
3386 14371 : fn calls_counter_pair(
3387 14371 : &self,
3388 14371 : file_kind: &RemoteOpFileKind,
3389 14371 : op_kind: &RemoteOpKind,
3390 14371 : ) -> IntCounterPair {
3391 14371 : let mut guard = self.calls.lock().unwrap();
3392 14371 : let key = (file_kind.as_str(), op_kind.as_str());
3393 14371 : let metric = guard.entry(key).or_insert_with(move || {
3394 1684 : REMOTE_TIMELINE_CLIENT_CALLS
3395 1684 : .get_metric_with_label_values(&[
3396 1684 : &self.tenant_id,
3397 1684 : &self.shard_id,
3398 1684 : &self.timeline_id,
3399 1684 : key.0,
3400 1684 : key.1,
3401 1684 : ])
3402 1684 : .unwrap()
3403 14371 : });
3404 14371 : metric.clone()
3405 14371 : }
3406 :
3407 3488 : fn bytes_started_counter(
3408 3488 : &self,
3409 3488 : file_kind: &RemoteOpFileKind,
3410 3488 : op_kind: &RemoteOpKind,
3411 3488 : ) -> IntCounter {
3412 3488 : let mut guard = self.bytes_started_counter.lock().unwrap();
3413 3488 : let key = (file_kind.as_str(), op_kind.as_str());
3414 3488 : let metric = guard.entry(key).or_insert_with(move || {
3415 660 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3416 660 : .get_metric_with_label_values(&[
3417 660 : &self.tenant_id,
3418 660 : &self.shard_id,
3419 660 : &self.timeline_id,
3420 660 : key.0,
3421 660 : key.1,
3422 660 : ])
3423 660 : .unwrap()
3424 3488 : });
3425 3488 : metric.clone()
3426 3488 : }
3427 :
3428 6566 : fn bytes_finished_counter(
3429 6566 : &self,
3430 6566 : file_kind: &RemoteOpFileKind,
3431 6566 : op_kind: &RemoteOpKind,
3432 6566 : ) -> IntCounter {
3433 6566 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3434 6566 : let key = (file_kind.as_str(), op_kind.as_str());
3435 6566 : let metric = guard.entry(key).or_insert_with(move || {
3436 660 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3437 660 : .get_metric_with_label_values(&[
3438 660 : &self.tenant_id,
3439 660 : &self.shard_id,
3440 660 : &self.timeline_id,
3441 660 : key.0,
3442 660 : key.1,
3443 660 : ])
3444 660 : .unwrap()
3445 6566 : });
3446 6566 : metric.clone()
3447 6566 : }
3448 : }
3449 :
3450 : #[cfg(test)]
3451 : impl RemoteTimelineClientMetrics {
3452 12 : pub fn get_bytes_started_counter_value(
3453 12 : &self,
3454 12 : file_kind: &RemoteOpFileKind,
3455 12 : op_kind: &RemoteOpKind,
3456 12 : ) -> Option<u64> {
3457 12 : let guard = self.bytes_started_counter.lock().unwrap();
3458 12 : let key = (file_kind.as_str(), op_kind.as_str());
3459 12 : guard.get(&key).map(|counter| counter.get())
3460 12 : }
3461 :
3462 12 : pub fn get_bytes_finished_counter_value(
3463 12 : &self,
3464 12 : file_kind: &RemoteOpFileKind,
3465 12 : op_kind: &RemoteOpKind,
3466 12 : ) -> Option<u64> {
3467 12 : let guard = self.bytes_finished_counter.lock().unwrap();
3468 12 : let key = (file_kind.as_str(), op_kind.as_str());
3469 12 : guard.get(&key).map(|counter| counter.get())
3470 12 : }
3471 : }
3472 :
3473 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3474 : #[must_use]
3475 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3476 : /// Decremented on drop.
3477 : calls_counter_pair: Option<IntCounterPair>,
3478 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3479 : bytes_finished: Option<(IntCounter, u64)>,
3480 : }
3481 :
3482 : impl RemoteTimelineClientCallMetricGuard {
3483 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3484 : /// The caller vouches to do the metric updates manually.
3485 7626 : pub fn will_decrement_manually(mut self) {
3486 7626 : let RemoteTimelineClientCallMetricGuard {
3487 7626 : calls_counter_pair,
3488 7626 : bytes_finished,
3489 7626 : } = &mut self;
3490 7626 : calls_counter_pair.take();
3491 7626 : bytes_finished.take();
3492 7626 : }
3493 : }
3494 :
3495 : impl Drop for RemoteTimelineClientCallMetricGuard {
3496 7694 : fn drop(&mut self) {
3497 7694 : let RemoteTimelineClientCallMetricGuard {
3498 7694 : calls_counter_pair,
3499 7694 : bytes_finished,
3500 7694 : } = self;
3501 7694 : if let Some(guard) = calls_counter_pair.take() {
3502 68 : guard.dec();
3503 7626 : }
3504 7694 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3505 0 : bytes_finished_metric.inc_by(*value);
3506 7694 : }
3507 7694 : }
3508 : }
3509 :
3510 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3511 : /// track the byte size of this call in applicable metric(s).
3512 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3513 : /// Do not account for this call's byte size in any metrics.
3514 : /// The `reason` field is there to make the call sites self-documenting
3515 : /// about why they don't need the metric.
3516 : DontTrackSize { reason: &'static str },
3517 : /// Track the byte size of the call in applicable metric(s).
3518 : Bytes(u64),
3519 : }
3520 :
3521 : impl RemoteTimelineClientMetrics {
3522 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3523 : ///
3524 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3525 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3526 : /// is more suitable.
3527 : /// Never do both.
3528 7694 : pub(crate) fn call_begin(
3529 7694 : &self,
3530 7694 : file_kind: &RemoteOpFileKind,
3531 7694 : op_kind: &RemoteOpKind,
3532 7694 : size: RemoteTimelineClientMetricsCallTrackSize,
3533 7694 : ) -> RemoteTimelineClientCallMetricGuard {
3534 7694 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3535 7694 : calls_counter_pair.inc();
3536 :
3537 7694 : let bytes_finished = match size {
3538 4206 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3539 4206 : // nothing to do
3540 4206 : None
3541 : }
3542 3488 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3543 3488 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3544 3488 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3545 3488 : Some((finished_counter, size))
3546 : }
3547 : };
3548 7694 : RemoteTimelineClientCallMetricGuard {
3549 7694 : calls_counter_pair: Some(calls_counter_pair),
3550 7694 : bytes_finished,
3551 7694 : }
3552 7694 : }
3553 :
3554 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3555 : /// Using the guard object is generally preferable.
3556 : /// See [`call_begin`](Self::call_begin) for more context.
3557 6677 : pub(crate) fn call_end(
3558 6677 : &self,
3559 6677 : file_kind: &RemoteOpFileKind,
3560 6677 : op_kind: &RemoteOpKind,
3561 6677 : size: RemoteTimelineClientMetricsCallTrackSize,
3562 6677 : ) {
3563 6677 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3564 6677 : calls_counter_pair.dec();
3565 6677 : match size {
3566 3599 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3567 3078 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3568 3078 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3569 3078 : }
3570 : }
3571 6677 : }
3572 : }
3573 :
3574 : impl Drop for RemoteTimelineClientMetrics {
3575 40 : fn drop(&mut self) {
3576 40 : let RemoteTimelineClientMetrics {
3577 40 : tenant_id,
3578 40 : shard_id,
3579 40 : timeline_id,
3580 40 : remote_physical_size_gauge,
3581 40 : calls,
3582 40 : bytes_started_counter,
3583 40 : bytes_finished_counter,
3584 40 : projected_remote_consistent_lsn_gauge,
3585 40 : } = self;
3586 48 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3587 48 : let mut res = [Ok(()), Ok(())];
3588 48 : REMOTE_TIMELINE_CLIENT_CALLS
3589 48 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3590 48 : // don't care about results
3591 48 : }
3592 40 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3593 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3594 12 : tenant_id,
3595 12 : shard_id,
3596 12 : timeline_id,
3597 12 : a,
3598 12 : b,
3599 12 : ]);
3600 12 : }
3601 40 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3602 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3603 12 : tenant_id,
3604 12 : shard_id,
3605 12 : timeline_id,
3606 12 : a,
3607 12 : b,
3608 12 : ]);
3609 12 : }
3610 40 : {
3611 40 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3612 40 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3613 40 : }
3614 40 : {
3615 40 : let _ = projected_remote_consistent_lsn_gauge;
3616 40 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3617 40 : tenant_id,
3618 40 : shard_id,
3619 40 : timeline_id,
3620 40 : ]);
3621 40 : }
3622 40 : }
3623 : }
3624 :
3625 : /// Wrapper future that measures the time spent by a remote storage operation,
3626 : /// and records the time and success/failure as a prometheus metric.
3627 : pub(crate) trait MeasureRemoteOp: Sized {
3628 6422 : fn measure_remote_op(
3629 6422 : self,
3630 6422 : file_kind: RemoteOpFileKind,
3631 6422 : op: RemoteOpKind,
3632 6422 : metrics: Arc<RemoteTimelineClientMetrics>,
3633 6422 : ) -> MeasuredRemoteOp<Self> {
3634 6422 : let start = Instant::now();
3635 6422 : MeasuredRemoteOp {
3636 6422 : inner: self,
3637 6422 : file_kind,
3638 6422 : op,
3639 6422 : start,
3640 6422 : metrics,
3641 6422 : }
3642 6422 : }
3643 : }
3644 :
3645 : impl<T: Sized> MeasureRemoteOp for T {}
3646 :
3647 : pin_project! {
3648 : pub(crate) struct MeasuredRemoteOp<F>
3649 : {
3650 : #[pin]
3651 : inner: F,
3652 : file_kind: RemoteOpFileKind,
3653 : op: RemoteOpKind,
3654 : start: Instant,
3655 : metrics: Arc<RemoteTimelineClientMetrics>,
3656 : }
3657 : }
3658 :
3659 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
3660 : type Output = Result<O, E>;
3661 :
3662 91386 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3663 91386 : let this = self.project();
3664 91386 : let poll_result = this.inner.poll(cx);
3665 91386 : if let Poll::Ready(ref res) = poll_result {
3666 6112 : let duration = this.start.elapsed();
3667 6112 : let status = if res.is_ok() { &"success" } else { &"failure" };
3668 6112 : this.metrics
3669 6112 : .remote_operation_time(this.file_kind, this.op, status)
3670 6112 : .observe(duration.as_secs_f64());
3671 85274 : }
3672 91386 : poll_result
3673 91386 : }
3674 : }
3675 :
3676 : pub mod tokio_epoll_uring {
3677 : use std::collections::HashMap;
3678 : use std::sync::{Arc, Mutex};
3679 :
3680 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3681 : use once_cell::sync::Lazy;
3682 :
3683 : /// Shared storage for tokio-epoll-uring thread local metrics.
3684 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3685 240 : Lazy::new(|| {
3686 240 : let slots_submission_queue_depth = register_histogram!(
3687 240 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3688 240 : "The slots waiters queue depth of each tokio_epoll_uring system",
3689 240 : vec![
3690 240 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3691 240 : ],
3692 240 : )
3693 240 : .expect("failed to define a metric");
3694 240 : ThreadLocalMetricsStorage {
3695 240 : observers: Mutex::new(HashMap::new()),
3696 240 : slots_submission_queue_depth,
3697 240 : }
3698 240 : });
3699 :
3700 : pub struct ThreadLocalMetricsStorage {
3701 : /// List of thread local metrics observers.
3702 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3703 : /// A histogram shared between all thread local systems
3704 : /// for collecting slots submission queue depth.
3705 : slots_submission_queue_depth: Histogram,
3706 : }
3707 :
3708 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3709 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3710 : ///
3711 : /// The System makes observations into [`Self`] and periodically, the collector
3712 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3713 : ///
3714 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3715 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3716 : /// for cache coherence protocol to get an exclusive cache line.
3717 : pub struct ThreadLocalMetrics {
3718 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3719 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3720 : }
3721 :
3722 : impl ThreadLocalMetricsStorage {
3723 : /// Registers a new thread local system. Returns a thread local metrics observer.
3724 1061 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3725 1061 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3726 1061 : self.slots_submission_queue_depth.local(),
3727 1061 : ));
3728 1061 : let mut g = self.observers.lock().unwrap();
3729 1061 : g.insert(id, Arc::clone(&per_system_metrics));
3730 1061 : per_system_metrics
3731 1061 : }
3732 :
3733 : /// Removes metrics observer for a thread local system.
3734 : /// This should be called before dropping a thread local system.
3735 240 : pub fn remove_system(&self, id: u64) {
3736 240 : let mut g = self.observers.lock().unwrap();
3737 240 : g.remove(&id);
3738 240 : }
3739 :
3740 : /// Flush all thread local metrics to the shared storage.
3741 0 : pub fn flush_thread_local_metrics(&self) {
3742 0 : let g = self.observers.lock().unwrap();
3743 0 : g.values().for_each(|local| {
3744 0 : local.flush();
3745 0 : });
3746 0 : }
3747 : }
3748 :
3749 : impl ThreadLocalMetrics {
3750 1061 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3751 1061 : ThreadLocalMetrics {
3752 1061 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3753 1061 : }
3754 1061 : }
3755 :
3756 : /// Flushes the thread local metrics to shared aggregator.
3757 0 : pub fn flush(&self) {
3758 0 : let Self {
3759 0 : slots_submission_queue_depth,
3760 0 : } = self;
3761 0 : slots_submission_queue_depth.lock().unwrap().flush();
3762 0 : }
3763 : }
3764 :
3765 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3766 1819497 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3767 1819497 : let Self {
3768 1819497 : slots_submission_queue_depth,
3769 1819497 : } = self;
3770 1819497 : slots_submission_queue_depth
3771 1819497 : .lock()
3772 1819497 : .unwrap()
3773 1819497 : .observe(queue_depth as f64);
3774 1819497 : }
3775 : }
3776 :
3777 : pub struct Collector {
3778 : descs: Vec<metrics::core::Desc>,
3779 : systems_created: UIntGauge,
3780 : systems_destroyed: UIntGauge,
3781 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3782 : }
3783 :
3784 : impl metrics::core::Collector for Collector {
3785 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3786 0 : self.descs.iter().collect()
3787 0 : }
3788 :
3789 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3790 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3791 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
3792 0 : systems_created,
3793 0 : systems_destroyed,
3794 0 : } = tokio_epoll_uring::metrics::global();
3795 0 : self.systems_created.set(systems_created);
3796 0 : mfs.extend(self.systems_created.collect());
3797 0 : self.systems_destroyed.set(systems_destroyed);
3798 0 : mfs.extend(self.systems_destroyed.collect());
3799 0 :
3800 0 : self.thread_local_metrics_storage
3801 0 : .flush_thread_local_metrics();
3802 0 :
3803 0 : mfs.extend(
3804 0 : self.thread_local_metrics_storage
3805 0 : .slots_submission_queue_depth
3806 0 : .collect(),
3807 0 : );
3808 0 : mfs
3809 0 : }
3810 : }
3811 :
3812 : impl Collector {
3813 : const NMETRICS: usize = 3;
3814 :
3815 : #[allow(clippy::new_without_default)]
3816 0 : pub fn new() -> Self {
3817 0 : let mut descs = Vec::new();
3818 0 :
3819 0 : let systems_created = UIntGauge::new(
3820 0 : "pageserver_tokio_epoll_uring_systems_created",
3821 0 : "counter of tokio-epoll-uring systems that were created",
3822 0 : )
3823 0 : .unwrap();
3824 0 : descs.extend(
3825 0 : metrics::core::Collector::desc(&systems_created)
3826 0 : .into_iter()
3827 0 : .cloned(),
3828 0 : );
3829 0 :
3830 0 : let systems_destroyed = UIntGauge::new(
3831 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
3832 0 : "counter of tokio-epoll-uring systems that were destroyed",
3833 0 : )
3834 0 : .unwrap();
3835 0 : descs.extend(
3836 0 : metrics::core::Collector::desc(&systems_destroyed)
3837 0 : .into_iter()
3838 0 : .cloned(),
3839 0 : );
3840 0 :
3841 0 : Self {
3842 0 : descs,
3843 0 : systems_created,
3844 0 : systems_destroyed,
3845 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
3846 0 : }
3847 0 : }
3848 : }
3849 :
3850 240 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3851 240 : register_int_counter!(
3852 240 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
3853 240 : "Number of times where thread_local_system creation spanned multiple executor threads",
3854 240 : )
3855 240 : .unwrap()
3856 240 : });
3857 :
3858 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3859 0 : register_int_counter!(
3860 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
3861 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
3862 0 : )
3863 0 : .unwrap()
3864 0 : });
3865 : }
3866 :
3867 : pub(crate) struct GlobalAndPerTenantIntCounter {
3868 : global: IntCounter,
3869 : per_tenant: IntCounter,
3870 : }
3871 :
3872 : impl GlobalAndPerTenantIntCounter {
3873 : #[inline(always)]
3874 0 : pub(crate) fn inc(&self) {
3875 0 : self.inc_by(1)
3876 0 : }
3877 : #[inline(always)]
3878 450454 : pub(crate) fn inc_by(&self, n: u64) {
3879 450454 : self.global.inc_by(n);
3880 450454 : self.per_tenant.inc_by(n);
3881 450454 : }
3882 : }
3883 :
3884 : pub(crate) mod tenant_throttling {
3885 : use metrics::register_int_counter_vec;
3886 : use once_cell::sync::Lazy;
3887 : use utils::shard::TenantShardId;
3888 :
3889 : use super::GlobalAndPerTenantIntCounter;
3890 :
3891 : pub(crate) struct Metrics<const KIND: usize> {
3892 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
3893 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
3894 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
3895 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
3896 : }
3897 :
3898 420 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3899 420 : register_int_counter_vec!(
3900 420 : "pageserver_tenant_throttling_count_accounted_start_global",
3901 420 : "Count of tenant throttling starts, by kind of throttle.",
3902 420 : &["kind"]
3903 420 : )
3904 420 : .unwrap()
3905 420 : });
3906 420 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3907 420 : register_int_counter_vec!(
3908 420 : "pageserver_tenant_throttling_count_accounted_start",
3909 420 : "Count of tenant throttling starts, by kind of throttle.",
3910 420 : &["kind", "tenant_id", "shard_id"]
3911 420 : )
3912 420 : .unwrap()
3913 420 : });
3914 420 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3915 420 : register_int_counter_vec!(
3916 420 : "pageserver_tenant_throttling_count_accounted_finish_global",
3917 420 : "Count of tenant throttling finishes, by kind of throttle.",
3918 420 : &["kind"]
3919 420 : )
3920 420 : .unwrap()
3921 420 : });
3922 420 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3923 420 : register_int_counter_vec!(
3924 420 : "pageserver_tenant_throttling_count_accounted_finish",
3925 420 : "Count of tenant throttling finishes, by kind of throttle.",
3926 420 : &["kind", "tenant_id", "shard_id"]
3927 420 : )
3928 420 : .unwrap()
3929 420 : });
3930 420 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3931 420 : register_int_counter_vec!(
3932 420 : "pageserver_tenant_throttling_wait_usecs_sum_global",
3933 420 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3934 420 : &["kind"]
3935 420 : )
3936 420 : .unwrap()
3937 420 : });
3938 420 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3939 420 : register_int_counter_vec!(
3940 420 : "pageserver_tenant_throttling_wait_usecs_sum",
3941 420 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3942 420 : &["kind", "tenant_id", "shard_id"]
3943 420 : )
3944 420 : .unwrap()
3945 420 : });
3946 :
3947 420 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3948 420 : register_int_counter_vec!(
3949 420 : "pageserver_tenant_throttling_count_global",
3950 420 : "Count of tenant throttlings, by kind of throttle.",
3951 420 : &["kind"]
3952 420 : )
3953 420 : .unwrap()
3954 420 : });
3955 420 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3956 420 : register_int_counter_vec!(
3957 420 : "pageserver_tenant_throttling_count",
3958 420 : "Count of tenant throttlings, by kind of throttle.",
3959 420 : &["kind", "tenant_id", "shard_id"]
3960 420 : )
3961 420 : .unwrap()
3962 420 : });
3963 :
3964 : const KINDS: &[&str] = &["pagestream"];
3965 : pub type Pagestream = Metrics<0>;
3966 :
3967 : impl<const KIND: usize> Metrics<KIND> {
3968 456 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
3969 456 : let per_tenant_label_values = &[
3970 456 : KINDS[KIND],
3971 456 : &tenant_shard_id.tenant_id.to_string(),
3972 456 : &tenant_shard_id.shard_slug().to_string(),
3973 456 : ];
3974 456 : Metrics {
3975 456 : count_accounted_start: {
3976 456 : GlobalAndPerTenantIntCounter {
3977 456 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
3978 456 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
3979 456 : .with_label_values(per_tenant_label_values),
3980 456 : }
3981 456 : },
3982 456 : count_accounted_finish: {
3983 456 : GlobalAndPerTenantIntCounter {
3984 456 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
3985 456 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
3986 456 : .with_label_values(per_tenant_label_values),
3987 456 : }
3988 456 : },
3989 456 : wait_time: {
3990 456 : GlobalAndPerTenantIntCounter {
3991 456 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
3992 456 : per_tenant: WAIT_USECS_PER_TENANT
3993 456 : .with_label_values(per_tenant_label_values),
3994 456 : }
3995 456 : },
3996 456 : count_throttled: {
3997 456 : GlobalAndPerTenantIntCounter {
3998 456 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
3999 456 : per_tenant: WAIT_COUNT_PER_TENANT
4000 456 : .with_label_values(per_tenant_label_values),
4001 456 : }
4002 456 : },
4003 456 : }
4004 456 : }
4005 : }
4006 :
4007 0 : pub(crate) fn preinitialize_global_metrics() {
4008 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4009 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4010 0 : Lazy::force(&WAIT_USECS);
4011 0 : Lazy::force(&WAIT_COUNT);
4012 0 : }
4013 :
4014 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4015 48 : for m in &[
4016 12 : &COUNT_ACCOUNTED_START_PER_TENANT,
4017 12 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4018 12 : &WAIT_USECS_PER_TENANT,
4019 12 : &WAIT_COUNT_PER_TENANT,
4020 12 : ] {
4021 96 : for kind in KINDS {
4022 48 : let _ = m.remove_label_values(&[
4023 48 : kind,
4024 48 : &tenant_shard_id.tenant_id.to_string(),
4025 48 : &tenant_shard_id.shard_slug().to_string(),
4026 48 : ]);
4027 48 : }
4028 : }
4029 12 : }
4030 : }
4031 :
4032 : pub(crate) mod disk_usage_based_eviction {
4033 : use super::*;
4034 :
4035 : pub(crate) struct Metrics {
4036 : pub(crate) tenant_collection_time: Histogram,
4037 : pub(crate) tenant_layer_count: Histogram,
4038 : pub(crate) layers_collected: IntCounter,
4039 : pub(crate) layers_selected: IntCounter,
4040 : pub(crate) layers_evicted: IntCounter,
4041 : }
4042 :
4043 : impl Default for Metrics {
4044 0 : fn default() -> Self {
4045 0 : let tenant_collection_time = register_histogram!(
4046 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4047 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4048 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4049 0 : )
4050 0 : .unwrap();
4051 0 :
4052 0 : let tenant_layer_count = register_histogram!(
4053 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4054 0 : "Amount of layers gathered from a tenant",
4055 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4056 0 : )
4057 0 : .unwrap();
4058 0 :
4059 0 : let layers_collected = register_int_counter!(
4060 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4061 0 : "Amount of layers collected"
4062 0 : )
4063 0 : .unwrap();
4064 0 :
4065 0 : let layers_selected = register_int_counter!(
4066 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
4067 0 : "Amount of layers selected"
4068 0 : )
4069 0 : .unwrap();
4070 0 :
4071 0 : let layers_evicted = register_int_counter!(
4072 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4073 0 : "Amount of layers successfully evicted"
4074 0 : )
4075 0 : .unwrap();
4076 0 :
4077 0 : Self {
4078 0 : tenant_collection_time,
4079 0 : tenant_layer_count,
4080 0 : layers_collected,
4081 0 : layers_selected,
4082 0 : layers_evicted,
4083 0 : }
4084 0 : }
4085 : }
4086 :
4087 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4088 : }
4089 :
4090 408 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4091 408 : register_uint_gauge_vec!(
4092 408 : "pageserver_tokio_executor_thread_configured_count",
4093 408 : "Total number of configued tokio executor threads in the process.
4094 408 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4095 408 : &["setup"],
4096 408 : )
4097 408 : .unwrap()
4098 408 : });
4099 :
4100 408 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4101 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4102 408 : let _guard = SERIALIZE.lock().unwrap();
4103 408 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4104 408 : TOKIO_EXECUTOR_THREAD_COUNT
4105 408 : .get_metric_with_label_values(&[setup])
4106 408 : .unwrap()
4107 408 : .set(u64::try_from(num_threads.get()).unwrap());
4108 408 : }
4109 :
4110 0 : pub fn preinitialize_metrics(conf: &'static PageServerConf) {
4111 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4112 0 :
4113 0 : // Python tests need these and on some we do alerting.
4114 0 : //
4115 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4116 0 : // order:
4117 0 : // - global metrics reside in a Lazy<PageserverMetrics>
4118 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4119 0 : // - could move the statics into TimelineMetrics::new()?
4120 0 :
4121 0 : // counters
4122 0 : [
4123 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4124 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4125 0 : &WALRECEIVER_BROKER_UPDATES,
4126 0 : &WALRECEIVER_CANDIDATES_ADDED,
4127 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4128 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4129 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4130 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4131 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4132 0 : &CIRCUIT_BREAKERS_BROKEN,
4133 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4134 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4135 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4136 0 : ]
4137 0 : .into_iter()
4138 0 : .for_each(|c| {
4139 0 : Lazy::force(c);
4140 0 : });
4141 0 :
4142 0 : // Deletion queue stats
4143 0 : Lazy::force(&DELETION_QUEUE);
4144 0 :
4145 0 : // Tenant stats
4146 0 : Lazy::force(&TENANT);
4147 0 :
4148 0 : // Tenant manager stats
4149 0 : Lazy::force(&TENANT_MANAGER);
4150 0 :
4151 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4152 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4153 :
4154 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4155 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4156 0 : // values from last restart.
4157 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4158 0 : }
4159 :
4160 : // countervecs
4161 0 : [
4162 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4163 0 : &SMGR_QUERY_STARTED_GLOBAL,
4164 0 : ]
4165 0 : .into_iter()
4166 0 : .for_each(|c| {
4167 0 : Lazy::force(c);
4168 0 : });
4169 0 :
4170 0 : // gauges
4171 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4172 0 :
4173 0 : // histograms
4174 0 : [
4175 0 : &LAYERS_PER_READ_GLOBAL,
4176 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4177 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4178 0 : &DELTAS_PER_READ_GLOBAL,
4179 0 : &WAIT_LSN_TIME,
4180 0 : &WAL_REDO_TIME,
4181 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4182 0 : &WAL_REDO_BYTES_HISTOGRAM,
4183 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4184 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4185 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4186 0 : ]
4187 0 : .into_iter()
4188 0 : .for_each(|h| {
4189 0 : Lazy::force(h);
4190 0 : });
4191 0 :
4192 0 : // Custom
4193 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4194 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4195 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4196 0 :
4197 0 : tenant_throttling::preinitialize_global_metrics();
4198 0 : }
|