Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::sync::atomic::AtomicU64;
5 : use std::sync::{Arc, Mutex};
6 : use std::time::{Duration, Instant};
7 :
8 : use enum_map::{Enum as _, EnumMap};
9 : use futures::Future;
10 : use metrics::{
11 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
12 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
13 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
14 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
15 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
16 : };
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::config::{
19 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
20 : PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy,
21 : };
22 : use pageserver_api::models::InMemoryLayerInfo;
23 : use pageserver_api::shard::TenantShardId;
24 : use postgres_backend::{QueryError, is_expected_io_error};
25 : use pq_proto::framed::ConnectionError;
26 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
27 : use strum_macros::{IntoStaticStr, VariantNames};
28 : use utils::id::TimelineId;
29 :
30 : use crate::config;
31 : use crate::config::PageServerConf;
32 : use crate::context::{PageContentKind, RequestContext};
33 : use crate::pgdatadir_mapping::DatadirModificationStats;
34 : use crate::task_mgr::TaskKind;
35 : use crate::tenant::Timeline;
36 : use crate::tenant::layer_map::LayerMap;
37 : use crate::tenant::mgr::TenantSlot;
38 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
39 : use crate::tenant::tasks::BackgroundLoopKind;
40 : use crate::tenant::throttle::ThrottleResult;
41 :
42 : /// Prometheus histogram buckets (in seconds) for operations in the critical
43 : /// path. In other words, operations that directly affect that latency of user
44 : /// queries.
45 : ///
46 : /// The buckets capture the majority of latencies in the microsecond and
47 : /// millisecond range but also extend far enough up to distinguish "bad" from
48 : /// "really bad".
49 : const CRITICAL_OP_BUCKETS: &[f64] = &[
50 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
51 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
52 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
53 : ];
54 :
55 : // Metrics collected on operations on the storage repository.
56 : #[derive(Debug, VariantNames, IntoStaticStr)]
57 : #[strum(serialize_all = "kebab_case")]
58 : pub(crate) enum StorageTimeOperation {
59 : #[strum(serialize = "layer flush")]
60 : LayerFlush,
61 :
62 : #[strum(serialize = "layer flush delay")]
63 : LayerFlushDelay,
64 :
65 : #[strum(serialize = "compact")]
66 : Compact,
67 :
68 : #[strum(serialize = "create images")]
69 : CreateImages,
70 :
71 : #[strum(serialize = "logical size")]
72 : LogicalSize,
73 :
74 : #[strum(serialize = "imitate logical size")]
75 : ImitateLogicalSize,
76 :
77 : #[strum(serialize = "load layer map")]
78 : LoadLayerMap,
79 :
80 : #[strum(serialize = "gc")]
81 : Gc,
82 :
83 : #[strum(serialize = "find gc cutoffs")]
84 : FindGcCutoffs,
85 : }
86 :
87 107 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
88 107 : register_counter_vec!(
89 107 : "pageserver_storage_operations_seconds_sum",
90 107 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
91 107 : &["operation", "tenant_id", "shard_id", "timeline_id"],
92 107 : )
93 107 : .expect("failed to define a metric")
94 107 : });
95 :
96 107 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
97 107 : register_int_counter_vec!(
98 107 : "pageserver_storage_operations_seconds_count",
99 107 : "Count of storage operations with operation, tenant and timeline dimensions",
100 107 : &["operation", "tenant_id", "shard_id", "timeline_id"],
101 107 : )
102 107 : .expect("failed to define a metric")
103 107 : });
104 :
105 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
106 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
107 :
108 107 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
109 107 : register_histogram_vec!(
110 107 : "pageserver_storage_operations_seconds_global",
111 107 : "Time spent on storage operations",
112 107 : &["operation"],
113 107 : STORAGE_OP_BUCKETS.into(),
114 107 : )
115 107 : .expect("failed to define a metric")
116 107 : });
117 :
118 : /// Measures layers visited per read (i.e. read amplification).
119 : ///
120 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
121 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
122 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
123 : /// care about.
124 107 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
125 107 : register_histogram_vec!(
126 107 : "pageserver_layers_per_read",
127 107 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
128 107 : &["tenant_id", "shard_id", "timeline_id"],
129 107 : // Low resolution to reduce cardinality.
130 107 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
131 107 : )
132 107 : .expect("failed to define a metric")
133 107 : });
134 :
135 105 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
136 105 : register_histogram!(
137 105 : "pageserver_layers_per_read_global",
138 105 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
139 105 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
140 105 : )
141 105 : .expect("failed to define a metric")
142 105 : });
143 :
144 105 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
145 105 : register_histogram!(
146 105 : "pageserver_layers_per_read_batch_global",
147 105 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
148 105 : vec![
149 105 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
150 105 : ],
151 105 : )
152 105 : .expect("failed to define a metric")
153 105 : });
154 :
155 105 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
156 105 : register_histogram!(
157 105 : "pageserver_layers_per_read_amortized_global",
158 105 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
159 105 : all visited layers are divided by number of reads.",
160 105 : vec![
161 105 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
162 105 : ],
163 105 : )
164 105 : .expect("failed to define a metric")
165 105 : });
166 :
167 105 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
168 105 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
169 105 : register_histogram!(
170 105 : "pageserver_deltas_per_read_global",
171 105 : "Number of delta pages applied to image page per read",
172 105 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
173 105 : )
174 105 : .expect("failed to define a metric")
175 105 : });
176 :
177 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
178 0 : register_uint_gauge!(
179 0 : "pageserver_concurrent_initdb",
180 0 : "Number of initdb processes running"
181 0 : )
182 0 : .expect("failed to define a metric")
183 0 : });
184 :
185 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
186 0 : register_histogram!(
187 0 : "pageserver_initdb_semaphore_seconds_global",
188 0 : "Time spent getting a permit from the global initdb semaphore",
189 0 : STORAGE_OP_BUCKETS.into()
190 0 : )
191 0 : .expect("failed to define metric")
192 0 : });
193 :
194 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
195 0 : register_histogram!(
196 0 : "pageserver_initdb_seconds_global",
197 0 : "Time spent performing initdb",
198 0 : STORAGE_OP_BUCKETS.into()
199 0 : )
200 0 : .expect("failed to define metric")
201 0 : });
202 :
203 : pub(crate) struct GetVectoredLatency {
204 : map: EnumMap<TaskKind, Option<Histogram>>,
205 : }
206 :
207 : #[allow(dead_code)]
208 : pub(crate) struct ScanLatency {
209 : map: EnumMap<TaskKind, Option<Histogram>>,
210 : }
211 :
212 : impl GetVectoredLatency {
213 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
214 : // cardinality of the metric.
215 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
216 :
217 10882 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
218 10882 : self.map[task_kind].as_ref()
219 10882 : }
220 : }
221 :
222 : impl ScanLatency {
223 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
224 : // cardinality of the metric.
225 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
226 :
227 8 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
228 8 : self.map[task_kind].as_ref()
229 8 : }
230 : }
231 :
232 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
233 : parent: &'a Histogram,
234 : start: std::time::Instant,
235 : }
236 :
237 : impl<'a> ScanLatencyOngoingRecording<'a> {
238 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
239 0 : let start = Instant::now();
240 0 : ScanLatencyOngoingRecording { parent, start }
241 0 : }
242 :
243 0 : pub(crate) fn observe(self) {
244 0 : let elapsed = self.start.elapsed();
245 0 : self.parent.observe(elapsed.as_secs_f64());
246 0 : }
247 : }
248 :
249 103 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
250 103 : let inner = register_histogram_vec!(
251 103 : "pageserver_get_vectored_seconds",
252 103 : "Time spent in get_vectored.",
253 103 : &["task_kind"],
254 103 : CRITICAL_OP_BUCKETS.into(),
255 103 : )
256 103 : .expect("failed to define a metric");
257 103 :
258 103 : GetVectoredLatency {
259 3296 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
260 3296 : let task_kind = TaskKind::from_usize(task_kind_idx);
261 3296 :
262 3296 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
263 206 : let task_kind = task_kind.into();
264 206 : Some(inner.with_label_values(&[task_kind]))
265 : } else {
266 3090 : None
267 : }
268 3296 : })),
269 103 : }
270 103 : });
271 :
272 3 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
273 3 : let inner = register_histogram_vec!(
274 3 : "pageserver_scan_seconds",
275 3 : "Time spent in scan.",
276 3 : &["task_kind"],
277 3 : CRITICAL_OP_BUCKETS.into(),
278 3 : )
279 3 : .expect("failed to define a metric");
280 3 :
281 3 : ScanLatency {
282 96 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
283 96 : let task_kind = TaskKind::from_usize(task_kind_idx);
284 96 :
285 96 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
286 3 : let task_kind = task_kind.into();
287 3 : Some(inner.with_label_values(&[task_kind]))
288 : } else {
289 93 : None
290 : }
291 96 : })),
292 3 : }
293 3 : });
294 :
295 : pub(crate) struct PageCacheMetricsForTaskKind {
296 : pub read_accesses_immutable: IntCounter,
297 : pub read_hits_immutable: IntCounter,
298 : }
299 :
300 : pub(crate) struct PageCacheMetrics {
301 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
302 : }
303 :
304 50 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
305 50 : register_int_counter_vec!(
306 50 : "pageserver_page_cache_read_hits_total",
307 50 : "Number of read accesses to the page cache that hit",
308 50 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
309 50 : )
310 50 : .expect("failed to define a metric")
311 50 : });
312 :
313 50 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
314 50 : register_int_counter_vec!(
315 50 : "pageserver_page_cache_read_accesses_total",
316 50 : "Number of read accesses to the page cache",
317 50 : &["task_kind", "key_kind", "content_kind"]
318 50 : )
319 50 : .expect("failed to define a metric")
320 50 : });
321 :
322 50 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
323 1600 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
324 1600 : let task_kind = TaskKind::from_usize(task_kind);
325 1600 : let task_kind: &'static str = task_kind.into();
326 12800 : EnumMap::from_array(std::array::from_fn(|content_kind| {
327 12800 : let content_kind = PageContentKind::from_usize(content_kind);
328 12800 : let content_kind: &'static str = content_kind.into();
329 12800 : PageCacheMetricsForTaskKind {
330 12800 : read_accesses_immutable: {
331 12800 : PAGE_CACHE_READ_ACCESSES
332 12800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
333 12800 : .unwrap()
334 12800 : },
335 12800 :
336 12800 : read_hits_immutable: {
337 12800 : PAGE_CACHE_READ_HITS
338 12800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
339 12800 : .unwrap()
340 12800 : },
341 12800 : }
342 12800 : }))
343 1600 : })),
344 50 : });
345 :
346 : impl PageCacheMetrics {
347 561514 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
348 561514 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
349 561514 : }
350 : }
351 :
352 : pub(crate) struct PageCacheSizeMetrics {
353 : pub max_bytes: UIntGauge,
354 :
355 : pub current_bytes_immutable: UIntGauge,
356 : }
357 :
358 50 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
359 50 : register_uint_gauge_vec!(
360 50 : "pageserver_page_cache_size_current_bytes",
361 50 : "Current size of the page cache in bytes, by key kind",
362 50 : &["key_kind"]
363 50 : )
364 50 : .expect("failed to define a metric")
365 50 : });
366 :
367 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
368 50 : Lazy::new(|| PageCacheSizeMetrics {
369 50 : max_bytes: {
370 50 : register_uint_gauge!(
371 50 : "pageserver_page_cache_size_max_bytes",
372 50 : "Maximum size of the page cache in bytes"
373 50 : )
374 50 : .expect("failed to define a metric")
375 50 : },
376 50 : current_bytes_immutable: {
377 50 : PAGE_CACHE_SIZE_CURRENT_BYTES
378 50 : .get_metric_with_label_values(&["immutable"])
379 50 : .unwrap()
380 50 : },
381 50 : });
382 :
383 : pub(crate) mod page_cache_eviction_metrics {
384 : use std::num::NonZeroUsize;
385 :
386 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
387 : use once_cell::sync::Lazy;
388 :
389 : #[derive(Clone, Copy)]
390 : pub(crate) enum Outcome {
391 : FoundSlotUnused { iters: NonZeroUsize },
392 : FoundSlotEvicted { iters: NonZeroUsize },
393 : ItersExceeded { iters: NonZeroUsize },
394 : }
395 :
396 50 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
397 50 : register_int_counter_vec!(
398 50 : "pageserver_page_cache_find_victim_iters_total",
399 50 : "Counter for the number of iterations in the find_victim loop",
400 50 : &["outcome"],
401 50 : )
402 50 : .expect("failed to define a metric")
403 50 : });
404 :
405 50 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
406 50 : register_int_counter_vec!(
407 50 : "pageserver_page_cache_find_victim_calls",
408 50 : "Incremented at the end of each find_victim() call.\
409 50 : Filter by outcome to get e.g., eviction rate.",
410 50 : &["outcome"]
411 50 : )
412 50 : .unwrap()
413 50 : });
414 :
415 15399 : pub(crate) fn observe(outcome: Outcome) {
416 : macro_rules! dry {
417 : ($label:literal, $iters:expr) => {{
418 : static LABEL: &'static str = $label;
419 : static ITERS_TOTAL: Lazy<IntCounter> =
420 59 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
421 : static CALLS: Lazy<IntCounter> =
422 59 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
423 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
424 : CALLS.inc();
425 : }};
426 : }
427 15399 : match outcome {
428 820 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
429 14579 : Outcome::FoundSlotEvicted { iters } => {
430 14579 : dry!("found_evicted", iters)
431 : }
432 0 : Outcome::ItersExceeded { iters } => {
433 0 : dry!("err_iters_exceeded", iters);
434 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
435 0 : }
436 : }
437 15399 : }
438 : }
439 :
440 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
441 0 : register_int_counter_vec!(
442 0 : "page_cache_errors_total",
443 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
444 0 : &["error_kind"]
445 0 : )
446 0 : .expect("failed to define a metric")
447 0 : });
448 :
449 : #[derive(IntoStaticStr)]
450 : #[strum(serialize_all = "kebab_case")]
451 : pub(crate) enum PageCacheErrorKind {
452 : AcquirePinnedSlotTimeout,
453 : EvictIterLimit,
454 : }
455 :
456 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
457 0 : PAGE_CACHE_ERRORS
458 0 : .get_metric_with_label_values(&[error_kind.into()])
459 0 : .unwrap()
460 0 : .inc();
461 0 : }
462 :
463 11 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
464 11 : register_histogram!(
465 11 : "pageserver_wait_lsn_seconds",
466 11 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
467 11 : CRITICAL_OP_BUCKETS.into(),
468 11 : )
469 11 : .expect("failed to define a metric")
470 11 : });
471 :
472 107 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
473 107 : register_int_counter_pair_vec!(
474 107 : "pageserver_wait_lsn_started_count",
475 107 : "Number of wait_lsn operations started.",
476 107 : "pageserver_wait_lsn_finished_count",
477 107 : "Number of wait_lsn operations finished.",
478 107 : &["tenant_id", "shard_id", "timeline_id"],
479 107 : )
480 107 : .expect("failed to define a metric")
481 107 : });
482 :
483 107 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
484 107 : register_int_counter_vec!(
485 107 : "pageserver_wait_lsn_in_progress_micros",
486 107 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
487 107 : &["tenant_id", "shard_id", "timeline_id"],
488 107 : )
489 107 : .expect("failed to define a metric")
490 107 : });
491 :
492 107 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
493 107 : register_int_counter!(
494 107 : "pageserver_wait_lsn_in_progress_micros_global",
495 107 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
496 107 : )
497 107 : .expect("failed to define a metric")
498 107 : });
499 :
500 3 : pub(crate) static ONDEMAND_DOWNLOAD_BYTES: Lazy<IntCounterVec> = Lazy::new(|| {
501 3 : register_int_counter_vec!(
502 3 : "pageserver_ondemand_download_bytes_total",
503 3 : "Total bytes of layers on-demand downloaded",
504 3 : &["task_kind"]
505 3 : )
506 3 : .expect("failed to define a metric")
507 3 : });
508 :
509 3 : pub(crate) static ONDEMAND_DOWNLOAD_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
510 3 : register_int_counter_vec!(
511 3 : "pageserver_ondemand_download_count",
512 3 : "Total count of layers on-demand downloaded",
513 3 : &["task_kind"]
514 3 : )
515 3 : .expect("failed to define a metric")
516 3 : });
517 :
518 : pub(crate) mod wait_ondemand_download_time {
519 : use super::*;
520 : const WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS: &[f64] = &[
521 : 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, // 10 ms - 100ms
522 : 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, // 100ms to 1s
523 : 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, // 1s to 10s
524 : 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, // 10s to 1m
525 : ];
526 :
527 : /// The task kinds for which we want to track wait times for on-demand downloads.
528 : /// Other task kinds' wait times are accumulated in label value `unknown`.
529 : pub(crate) const WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS: [TaskKind; 2] = [
530 : TaskKind::PageRequestHandler,
531 : TaskKind::WalReceiverConnectionHandler,
532 : ];
533 :
534 0 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL: Lazy<Vec<Histogram>> = Lazy::new(|| {
535 0 : let histo = register_histogram_vec!(
536 0 : "pageserver_wait_ondemand_download_seconds_global",
537 0 : "Observations are individual tasks' wait times for on-demand downloads. \
538 0 : If N tasks coalesce on an on-demand download, and it takes 10s, than we observe N * 10s.",
539 0 : &["task_kind"],
540 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS.into(),
541 0 : )
542 0 : .expect("failed to define a metric");
543 0 : WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
544 0 : .iter()
545 0 : .map(|task_kind| histo.with_label_values(&[task_kind.into()]))
546 0 : .collect::<Vec<_>>()
547 0 : });
548 :
549 107 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_SUM: Lazy<CounterVec> = Lazy::new(|| {
550 107 : register_counter_vec!(
551 107 : // use a name that _could_ be evolved into a per-timeline histogram later
552 107 : "pageserver_wait_ondemand_download_seconds_sum",
553 107 : "Like `pageserver_wait_ondemand_download_seconds_global` but per timeline",
554 107 : &["tenant_id", "shard_id", "timeline_id", "task_kind"],
555 107 : )
556 107 : .unwrap()
557 107 : });
558 :
559 : pub struct WaitOndemandDownloadTimeSum {
560 : counters: [Counter; WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS.len()],
561 : }
562 :
563 : impl WaitOndemandDownloadTimeSum {
564 233 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
565 233 : let counters = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
566 233 : .iter()
567 466 : .map(|task_kind| {
568 466 : WAIT_ONDEMAND_DOWNLOAD_TIME_SUM
569 466 : .get_metric_with_label_values(&[
570 466 : tenant_id,
571 466 : shard_id,
572 466 : timeline_id,
573 466 : task_kind.into(),
574 466 : ])
575 466 : .unwrap()
576 466 : })
577 233 : .collect::<Vec<_>>();
578 233 : Self {
579 233 : counters: counters.try_into().unwrap(),
580 233 : }
581 233 : }
582 12 : pub(crate) fn observe(&self, task_kind: TaskKind, duration: Duration) {
583 12 : let maybe = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
584 12 : .iter()
585 12 : .enumerate()
586 24 : .find(|(_, kind)| **kind == task_kind);
587 12 : let Some((idx, _)) = maybe else {
588 12 : return;
589 : };
590 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL[idx].observe(duration.as_secs_f64());
591 0 : let counter = &self.counters[idx];
592 0 : counter.inc_by(duration.as_secs_f64());
593 12 : }
594 : }
595 :
596 5 : pub(crate) fn shutdown_timeline(tenant_id: &str, shard_id: &str, timeline_id: &str) {
597 15 : for task_kind in WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS {
598 10 : let _ = WAIT_ONDEMAND_DOWNLOAD_TIME_SUM.remove_label_values(&[
599 10 : tenant_id,
600 10 : shard_id,
601 10 : timeline_id,
602 10 : task_kind.into(),
603 10 : ]);
604 10 : }
605 5 : }
606 :
607 0 : pub(crate) fn preinitialize_global_metrics() {
608 0 : Lazy::force(&WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL);
609 0 : }
610 : }
611 :
612 107 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
613 107 : register_int_gauge_vec!(
614 107 : "pageserver_last_record_lsn",
615 107 : "Last record LSN grouped by timeline",
616 107 : &["tenant_id", "shard_id", "timeline_id"]
617 107 : )
618 107 : .expect("failed to define a metric")
619 107 : });
620 :
621 107 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
622 107 : register_int_gauge_vec!(
623 107 : "pageserver_disk_consistent_lsn",
624 107 : "Disk consistent LSN grouped by timeline",
625 107 : &["tenant_id", "shard_id", "timeline_id"]
626 107 : )
627 107 : .expect("failed to define a metric")
628 107 : });
629 :
630 107 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
631 107 : register_uint_gauge_vec!(
632 107 : "pageserver_projected_remote_consistent_lsn",
633 107 : "Projected remote consistent LSN grouped by timeline",
634 107 : &["tenant_id", "shard_id", "timeline_id"]
635 107 : )
636 107 : .expect("failed to define a metric")
637 107 : });
638 :
639 107 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
640 107 : register_uint_gauge_vec!(
641 107 : "pageserver_pitr_history_size",
642 107 : "Data written since PITR cutoff on this timeline",
643 107 : &["tenant_id", "shard_id", "timeline_id"]
644 107 : )
645 107 : .expect("failed to define a metric")
646 107 : });
647 :
648 : #[derive(
649 60 : strum_macros::EnumIter,
650 0 : strum_macros::EnumString,
651 : strum_macros::Display,
652 : strum_macros::IntoStaticStr,
653 : )]
654 : #[strum(serialize_all = "kebab_case")]
655 : pub(crate) enum LayerKind {
656 : Delta,
657 : Image,
658 : }
659 :
660 : #[derive(
661 25 : strum_macros::EnumIter,
662 0 : strum_macros::EnumString,
663 : strum_macros::Display,
664 : strum_macros::IntoStaticStr,
665 : )]
666 : #[strum(serialize_all = "kebab_case")]
667 : pub(crate) enum LayerLevel {
668 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
669 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
670 : Frozen,
671 : L0,
672 : L1,
673 : }
674 :
675 105 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
676 105 : register_uint_gauge_vec!(
677 105 : "pageserver_layer_bytes",
678 105 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
679 105 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
680 105 : )
681 105 : .expect("failed to define a metric")
682 105 : });
683 :
684 105 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
685 105 : register_uint_gauge_vec!(
686 105 : "pageserver_layer_count",
687 105 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
688 105 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
689 105 : )
690 105 : .expect("failed to define a metric")
691 105 : });
692 :
693 107 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
694 107 : register_uint_gauge_vec!(
695 107 : "pageserver_archive_size",
696 107 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
697 107 : &["tenant_id", "shard_id", "timeline_id"]
698 107 : )
699 107 : .expect("failed to define a metric")
700 107 : });
701 :
702 107 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
703 107 : register_int_gauge_vec!(
704 107 : "pageserver_standby_horizon",
705 107 : "Standby apply LSN for which GC is hold off, by timeline.",
706 107 : &["tenant_id", "shard_id", "timeline_id"]
707 107 : )
708 107 : .expect("failed to define a metric")
709 107 : });
710 :
711 107 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
712 107 : register_uint_gauge_vec!(
713 107 : "pageserver_resident_physical_size",
714 107 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
715 107 : &["tenant_id", "shard_id", "timeline_id"]
716 107 : )
717 107 : .expect("failed to define a metric")
718 107 : });
719 :
720 107 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
721 107 : register_uint_gauge_vec!(
722 107 : "pageserver_visible_physical_size",
723 107 : "The size of the layer files present in the pageserver's filesystem.",
724 107 : &["tenant_id", "shard_id", "timeline_id"]
725 107 : )
726 107 : .expect("failed to define a metric")
727 107 : });
728 :
729 105 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
730 105 : register_uint_gauge!(
731 105 : "pageserver_resident_physical_size_global",
732 105 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
733 105 : )
734 105 : .expect("failed to define a metric")
735 105 : });
736 :
737 107 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
738 107 : register_uint_gauge_vec!(
739 107 : "pageserver_remote_physical_size",
740 107 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
741 107 : // Corollary: If any files are missing from the index part, they won't be included here.
742 107 : &["tenant_id", "shard_id", "timeline_id"]
743 107 : )
744 107 : .expect("failed to define a metric")
745 107 : });
746 :
747 107 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
748 107 : register_uint_gauge!(
749 107 : "pageserver_remote_physical_size_global",
750 107 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
751 107 : )
752 107 : .expect("failed to define a metric")
753 107 : });
754 :
755 3 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
756 3 : register_int_counter!(
757 3 : "pageserver_remote_ondemand_downloaded_layers_total",
758 3 : "Total on-demand downloaded layers"
759 3 : )
760 3 : .unwrap()
761 3 : });
762 :
763 3 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
764 3 : register_int_counter!(
765 3 : "pageserver_remote_ondemand_downloaded_bytes_total",
766 3 : "Total bytes of layers on-demand downloaded",
767 3 : )
768 3 : .unwrap()
769 3 : });
770 :
771 107 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
772 107 : register_uint_gauge_vec!(
773 107 : "pageserver_current_logical_size",
774 107 : "Current logical size grouped by timeline",
775 107 : &["tenant_id", "shard_id", "timeline_id"]
776 107 : )
777 107 : .expect("failed to define current logical size metric")
778 107 : });
779 :
780 107 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
781 107 : register_int_gauge_vec!(
782 107 : "pageserver_aux_file_estimated_size",
783 107 : "The size of all aux files for a timeline in aux file v2 store.",
784 107 : &["tenant_id", "shard_id", "timeline_id"]
785 107 : )
786 107 : .expect("failed to define a metric")
787 107 : });
788 :
789 107 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
790 107 : register_uint_gauge_vec!(
791 107 : "pageserver_valid_lsn_lease_count",
792 107 : "The number of valid leases after refreshing gc info.",
793 107 : &["tenant_id", "shard_id", "timeline_id"],
794 107 : )
795 107 : .expect("failed to define a metric")
796 107 : });
797 :
798 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
799 0 : register_int_counter!(
800 0 : "pageserver_circuit_breaker_broken",
801 0 : "How many times a circuit breaker has broken"
802 0 : )
803 0 : .expect("failed to define a metric")
804 0 : });
805 :
806 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
807 0 : register_int_counter!(
808 0 : "pageserver_circuit_breaker_unbroken",
809 0 : "How many times a circuit breaker has been un-broken (recovered)"
810 0 : )
811 0 : .expect("failed to define a metric")
812 0 : });
813 :
814 103 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
815 103 : register_int_counter!(
816 103 : "pageserver_compression_image_in_bytes_total",
817 103 : "Size of data written into image layers before compression"
818 103 : )
819 103 : .expect("failed to define a metric")
820 103 : });
821 :
822 103 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
823 103 : register_int_counter!(
824 103 : "pageserver_compression_image_in_bytes_considered",
825 103 : "Size of potentially compressible data written into image layers before compression"
826 103 : )
827 103 : .expect("failed to define a metric")
828 103 : });
829 :
830 103 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
831 103 : register_int_counter!(
832 103 : "pageserver_compression_image_in_bytes_chosen",
833 103 : "Size of data whose compressed form was written into image layers"
834 103 : )
835 103 : .expect("failed to define a metric")
836 103 : });
837 :
838 103 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
839 103 : register_int_counter!(
840 103 : "pageserver_compression_image_out_bytes_total",
841 103 : "Size of compressed image layer written"
842 103 : )
843 103 : .expect("failed to define a metric")
844 103 : });
845 :
846 5 : pub(crate) static RELSIZE_LATEST_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
847 5 : register_uint_gauge!(
848 5 : "pageserver_relsize_latest_cache_entries",
849 5 : "Number of entries in the latest relation size cache",
850 5 : )
851 5 : .expect("failed to define a metric")
852 5 : });
853 :
854 5 : pub(crate) static RELSIZE_LATEST_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
855 5 : register_int_counter!(
856 5 : "pageserver_relsize_latest_cache_hits",
857 5 : "Latest relation size cache hits",
858 5 : )
859 5 : .expect("failed to define a metric")
860 5 : });
861 :
862 4 : pub(crate) static RELSIZE_LATEST_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
863 4 : register_int_counter!(
864 4 : "pageserver_relsize_latest_cache_misses",
865 4 : "Relation size latest cache misses",
866 4 : )
867 4 : .expect("failed to define a metric")
868 4 : });
869 :
870 2 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
871 2 : register_uint_gauge!(
872 2 : "pageserver_relsize_snapshot_cache_entries",
873 2 : "Number of entries in the pitr relation size cache",
874 2 : )
875 2 : .expect("failed to define a metric")
876 2 : });
877 :
878 2 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
879 2 : register_int_counter!(
880 2 : "pageserver_relsize_snapshot_cache_hits",
881 2 : "Pitr relation size cache hits",
882 2 : )
883 2 : .expect("failed to define a metric")
884 2 : });
885 :
886 3 : pub(crate) static RELSIZE_SNAPSHOT_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
887 3 : register_int_counter!(
888 3 : "pageserver_relsize_snapshot_cache_misses",
889 3 : "Relation size snapshot cache misses",
890 3 : )
891 3 : .expect("failed to define a metric")
892 3 : });
893 :
894 2 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
895 2 : register_int_counter!(
896 2 : "pageserver_relsize_cache_misses_old",
897 2 : "Relation size cache misses where the lookup LSN is older than the last relation update"
898 2 : )
899 2 : .expect("failed to define a metric")
900 2 : });
901 :
902 : pub(crate) mod initial_logical_size {
903 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
904 : use once_cell::sync::Lazy;
905 :
906 : pub(crate) struct StartCalculation(IntCounterVec);
907 107 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
908 107 : StartCalculation(
909 107 : register_int_counter_vec!(
910 107 : "pageserver_initial_logical_size_start_calculation",
911 107 : "Incremented each time we start an initial logical size calculation attempt. \
912 107 : The `circumstances` label provides some additional details.",
913 107 : &["attempt", "circumstances"]
914 107 : )
915 107 : .unwrap(),
916 107 : )
917 107 : });
918 :
919 : struct DropCalculation {
920 : first: IntCounter,
921 : retry: IntCounter,
922 : }
923 :
924 107 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
925 107 : let vec = register_int_counter_vec!(
926 107 : "pageserver_initial_logical_size_drop_calculation",
927 107 : "Incremented each time we abort a started size calculation attmpt.",
928 107 : &["attempt"]
929 107 : )
930 107 : .unwrap();
931 107 : DropCalculation {
932 107 : first: vec.with_label_values(&["first"]),
933 107 : retry: vec.with_label_values(&["retry"]),
934 107 : }
935 107 : });
936 :
937 : pub(crate) struct Calculated {
938 : pub(crate) births: IntCounter,
939 : pub(crate) deaths: IntCounter,
940 : }
941 :
942 107 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
943 107 : births: register_int_counter!(
944 107 : "pageserver_initial_logical_size_finish_calculation",
945 107 : "Incremented every time we finish calculation of initial logical size.\
946 107 : If everything is working well, this should happen at most once per Timeline object."
947 107 : )
948 107 : .unwrap(),
949 107 : deaths: register_int_counter!(
950 107 : "pageserver_initial_logical_size_drop_finished_calculation",
951 107 : "Incremented when we drop a finished initial logical size calculation result.\
952 107 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
953 107 : )
954 107 : .unwrap(),
955 107 : });
956 :
957 : pub(crate) struct OngoingCalculationGuard {
958 : inc_drop_calculation: Option<IntCounter>,
959 : }
960 :
961 : #[derive(strum_macros::IntoStaticStr)]
962 : pub(crate) enum StartCircumstances {
963 : EmptyInitial,
964 : SkippedConcurrencyLimiter,
965 : AfterBackgroundTasksRateLimit,
966 : }
967 :
968 : impl StartCalculation {
969 113 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
970 113 : let circumstances_label: &'static str = circumstances.into();
971 113 : self.0
972 113 : .with_label_values(&["first", circumstances_label])
973 113 : .inc();
974 113 : OngoingCalculationGuard {
975 113 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
976 113 : }
977 113 : }
978 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
979 0 : let circumstances_label: &'static str = circumstances.into();
980 0 : self.0
981 0 : .with_label_values(&["retry", circumstances_label])
982 0 : .inc();
983 0 : OngoingCalculationGuard {
984 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
985 0 : }
986 0 : }
987 : }
988 :
989 : impl Drop for OngoingCalculationGuard {
990 113 : fn drop(&mut self) {
991 113 : if let Some(counter) = self.inc_drop_calculation.take() {
992 0 : counter.inc();
993 113 : }
994 113 : }
995 : }
996 :
997 : impl OngoingCalculationGuard {
998 113 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
999 113 : drop(self.inc_drop_calculation.take());
1000 113 : CALCULATED.births.inc();
1001 113 : FinishedCalculationGuard {
1002 113 : inc_on_drop: CALCULATED.deaths.clone(),
1003 113 : }
1004 113 : }
1005 : }
1006 :
1007 : pub(crate) struct FinishedCalculationGuard {
1008 : inc_on_drop: IntCounter,
1009 : }
1010 :
1011 : impl Drop for FinishedCalculationGuard {
1012 3 : fn drop(&mut self) {
1013 3 : self.inc_on_drop.inc();
1014 3 : }
1015 : }
1016 :
1017 : // context: https://github.com/neondatabase/neon/issues/5963
1018 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
1019 0 : Lazy::new(|| {
1020 0 : register_int_counter!(
1021 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
1022 0 : "Counter for the following event: walreceiver calls\
1023 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
1024 0 : )
1025 0 : .unwrap()
1026 0 : });
1027 : }
1028 :
1029 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
1030 0 : register_uint_gauge_vec!(
1031 0 : "pageserver_directory_entries_count",
1032 0 : "Sum of the entries in pageserver-stored directory listings",
1033 0 : &["tenant_id", "shard_id", "timeline_id"]
1034 0 : )
1035 0 : .expect("failed to define a metric")
1036 0 : });
1037 :
1038 108 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1039 108 : register_uint_gauge_vec!(
1040 108 : "pageserver_tenant_states_count",
1041 108 : "Count of tenants per state",
1042 108 : &["state"]
1043 108 : )
1044 108 : .expect("Failed to register pageserver_tenant_states_count metric")
1045 108 : });
1046 :
1047 : /// A set of broken tenants.
1048 : ///
1049 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
1050 : /// tenant.
1051 5 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
1052 5 : register_uint_gauge_vec!(
1053 5 : "pageserver_broken_tenants_count",
1054 5 : "Set of broken tenants",
1055 5 : &["tenant_id", "shard_id"]
1056 5 : )
1057 5 : .expect("Failed to register pageserver_tenant_states_count metric")
1058 5 : });
1059 :
1060 3 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1061 3 : register_uint_gauge_vec!(
1062 3 : "pageserver_tenant_synthetic_cached_size_bytes",
1063 3 : "Synthetic size of each tenant in bytes",
1064 3 : &["tenant_id"]
1065 3 : )
1066 3 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
1067 3 : });
1068 :
1069 108 : pub(crate) static TENANT_OFFLOADED_TIMELINES: Lazy<UIntGaugeVec> = Lazy::new(|| {
1070 108 : register_uint_gauge_vec!(
1071 108 : "pageserver_tenant_offloaded_timelines",
1072 108 : "Number of offloaded timelines of a tenant",
1073 108 : &["tenant_id", "shard_id"]
1074 108 : )
1075 108 : .expect("Failed to register pageserver_tenant_offloaded_timelines metric")
1076 108 : });
1077 :
1078 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
1079 0 : register_histogram_vec!(
1080 0 : "pageserver_eviction_iteration_duration_seconds_global",
1081 0 : "Time spent on a single eviction iteration",
1082 0 : &["period_secs", "threshold_secs"],
1083 0 : STORAGE_OP_BUCKETS.into(),
1084 0 : )
1085 0 : .expect("failed to define a metric")
1086 0 : });
1087 :
1088 107 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
1089 107 : register_int_counter_vec!(
1090 107 : "pageserver_evictions",
1091 107 : "Number of layers evicted from the pageserver",
1092 107 : &["tenant_id", "shard_id", "timeline_id"]
1093 107 : )
1094 107 : .expect("failed to define a metric")
1095 107 : });
1096 :
1097 107 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
1098 107 : register_int_counter_vec!(
1099 107 : "pageserver_evictions_with_low_residence_duration",
1100 107 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
1101 107 : Residence duration is determined using the `residence_duration_data_source`.",
1102 107 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
1103 107 : )
1104 107 : .expect("failed to define a metric")
1105 107 : });
1106 :
1107 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
1108 0 : register_int_counter!(
1109 0 : "pageserver_unexpected_ondemand_downloads_count",
1110 0 : "Number of unexpected on-demand downloads. \
1111 0 : We log more context for each increment, so, forgo any labels in this metric.",
1112 0 : )
1113 0 : .expect("failed to define a metric")
1114 0 : });
1115 :
1116 : /// How long did we take to start up? Broken down by labels to describe
1117 : /// different phases of startup.
1118 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
1119 0 : register_gauge_vec!(
1120 0 : "pageserver_startup_duration_seconds",
1121 0 : "Time taken by phases of pageserver startup, in seconds",
1122 0 : &["phase"]
1123 0 : )
1124 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
1125 0 : });
1126 :
1127 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
1128 0 : register_uint_gauge!(
1129 0 : "pageserver_startup_is_loading",
1130 0 : "1 while in initial startup load of tenants, 0 at other times"
1131 0 : )
1132 0 : .expect("Failed to register pageserver_startup_is_loading")
1133 0 : });
1134 :
1135 105 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
1136 105 : register_uint_gauge!(
1137 105 : "pageserver_timeline_ephemeral_bytes",
1138 105 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
1139 105 : )
1140 105 : .expect("Failed to register metric")
1141 105 : });
1142 :
1143 : /// Metrics related to the lifecycle of a [`crate::tenant::TenantShard`] object: things
1144 : /// like how long it took to load.
1145 : ///
1146 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1147 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1148 : /// at a timeline level than tenant level.
1149 : pub(crate) struct TenantMetrics {
1150 : /// How long did tenants take to go from construction to active state?
1151 : pub(crate) activation: Histogram,
1152 : pub(crate) preload: Histogram,
1153 : pub(crate) attach: Histogram,
1154 :
1155 : /// How many tenants are included in the initial startup of the pagesrever?
1156 : pub(crate) startup_scheduled: IntCounter,
1157 : pub(crate) startup_complete: IntCounter,
1158 : }
1159 :
1160 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1161 0 : TenantMetrics {
1162 0 : activation: register_histogram!(
1163 0 : "pageserver_tenant_activation_seconds",
1164 0 : "Time taken by tenants to activate, in seconds",
1165 0 : CRITICAL_OP_BUCKETS.into()
1166 0 : )
1167 0 : .expect("Failed to register metric"),
1168 0 : preload: register_histogram!(
1169 0 : "pageserver_tenant_preload_seconds",
1170 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1171 0 : CRITICAL_OP_BUCKETS.into()
1172 0 : )
1173 0 : .expect("Failed to register metric"),
1174 0 : attach: register_histogram!(
1175 0 : "pageserver_tenant_attach_seconds",
1176 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1177 0 : CRITICAL_OP_BUCKETS.into()
1178 0 : )
1179 0 : .expect("Failed to register metric"),
1180 0 : startup_scheduled: register_int_counter!(
1181 0 : "pageserver_tenant_startup_scheduled",
1182 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1183 0 : ).expect("Failed to register metric"),
1184 0 : startup_complete: register_int_counter!(
1185 0 : "pageserver_tenant_startup_complete",
1186 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1187 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1188 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1189 0 : ).expect("Failed to register metric"),
1190 0 : }
1191 0 : });
1192 :
1193 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1194 : #[derive(Debug)]
1195 : pub(crate) struct EvictionsWithLowResidenceDuration {
1196 : data_source: &'static str,
1197 : threshold: Duration,
1198 : counter: Option<IntCounter>,
1199 : }
1200 :
1201 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1202 : data_source: &'static str,
1203 : threshold: Duration,
1204 : }
1205 :
1206 : impl EvictionsWithLowResidenceDurationBuilder {
1207 233 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1208 233 : Self {
1209 233 : data_source,
1210 233 : threshold,
1211 233 : }
1212 233 : }
1213 :
1214 233 : fn build(
1215 233 : &self,
1216 233 : tenant_id: &str,
1217 233 : shard_id: &str,
1218 233 : timeline_id: &str,
1219 233 : ) -> EvictionsWithLowResidenceDuration {
1220 233 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1221 233 : .get_metric_with_label_values(&[
1222 233 : tenant_id,
1223 233 : shard_id,
1224 233 : timeline_id,
1225 233 : self.data_source,
1226 233 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1227 233 : ])
1228 233 : .unwrap();
1229 233 : EvictionsWithLowResidenceDuration {
1230 233 : data_source: self.data_source,
1231 233 : threshold: self.threshold,
1232 233 : counter: Some(counter),
1233 233 : }
1234 233 : }
1235 : }
1236 :
1237 : impl EvictionsWithLowResidenceDuration {
1238 238 : fn threshold_label_value(threshold: Duration) -> String {
1239 238 : format!("{}", threshold.as_secs())
1240 238 : }
1241 :
1242 2 : pub fn observe(&self, observed_value: Duration) {
1243 2 : if observed_value < self.threshold {
1244 2 : self.counter
1245 2 : .as_ref()
1246 2 : .expect("nobody calls this function after `remove_from_vec`")
1247 2 : .inc();
1248 2 : }
1249 2 : }
1250 :
1251 0 : pub fn change_threshold(
1252 0 : &mut self,
1253 0 : tenant_id: &str,
1254 0 : shard_id: &str,
1255 0 : timeline_id: &str,
1256 0 : new_threshold: Duration,
1257 0 : ) {
1258 0 : if new_threshold == self.threshold {
1259 0 : return;
1260 0 : }
1261 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1262 0 : self.data_source,
1263 0 : new_threshold,
1264 0 : )
1265 0 : .build(tenant_id, shard_id, timeline_id);
1266 0 : std::mem::swap(self, &mut with_new);
1267 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1268 0 : }
1269 :
1270 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1271 5 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1272 5 : let Some(_counter) = self.counter.take() else {
1273 0 : return;
1274 : };
1275 :
1276 5 : let threshold = Self::threshold_label_value(self.threshold);
1277 5 :
1278 5 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1279 5 : tenant_id,
1280 5 : shard_id,
1281 5 : timeline_id,
1282 5 : self.data_source,
1283 5 : &threshold,
1284 5 : ]);
1285 5 :
1286 5 : match removed {
1287 0 : Err(e) => {
1288 0 : // this has been hit in staging as
1289 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1290 0 : // because we can be in the drop path already, don't risk:
1291 0 : // - "double-panic => illegal instruction" or
1292 0 : // - future "drop panick => abort"
1293 0 : //
1294 0 : // so just nag: (the error has the labels)
1295 0 : tracing::warn!(
1296 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1297 : );
1298 : }
1299 : Ok(()) => {
1300 : // to help identify cases where we double-remove the same values, let's log all
1301 : // deletions?
1302 5 : tracing::info!(
1303 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1304 : self.data_source
1305 : );
1306 : }
1307 : }
1308 5 : }
1309 : }
1310 :
1311 : // Metrics collected on disk IO operations
1312 : //
1313 : // Roughly logarithmic scale.
1314 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1315 : 0.000030, // 30 usec
1316 : 0.001000, // 1000 usec
1317 : 0.030, // 30 ms
1318 : 1.000, // 1000 ms
1319 : 30.000, // 30000 ms
1320 : ];
1321 :
1322 : /// VirtualFile fs operation variants.
1323 : ///
1324 : /// Operations:
1325 : /// - open ([`std::fs::OpenOptions::open`])
1326 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1327 : /// - close-by-replace (close by replacement algorithm)
1328 : /// - read (`read_at`)
1329 : /// - write (`write_at`)
1330 : /// - seek (modify internal position or file length query)
1331 : /// - fsync ([`std::fs::File::sync_all`])
1332 : /// - metadata ([`std::fs::File::metadata`])
1333 : #[derive(
1334 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1335 : )]
1336 : pub(crate) enum StorageIoOperation {
1337 : Open,
1338 : OpenAfterReplace,
1339 : Close,
1340 : CloseByReplace,
1341 : Read,
1342 : Write,
1343 : Seek,
1344 : Fsync,
1345 : Metadata,
1346 : SetLen,
1347 : }
1348 :
1349 : impl StorageIoOperation {
1350 1200 : pub fn as_str(&self) -> &'static str {
1351 1200 : match self {
1352 120 : StorageIoOperation::Open => "open",
1353 120 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1354 120 : StorageIoOperation::Close => "close",
1355 120 : StorageIoOperation::CloseByReplace => "close-by-replace",
1356 120 : StorageIoOperation::Read => "read",
1357 120 : StorageIoOperation::Write => "write",
1358 120 : StorageIoOperation::Seek => "seek",
1359 120 : StorageIoOperation::Fsync => "fsync",
1360 120 : StorageIoOperation::Metadata => "metadata",
1361 120 : StorageIoOperation::SetLen => "set_len",
1362 : }
1363 1200 : }
1364 : }
1365 :
1366 : /// Tracks time taken by fs operations near VirtualFile.
1367 : #[derive(Debug)]
1368 : pub(crate) struct StorageIoTime {
1369 : metrics: [Histogram; StorageIoOperation::COUNT],
1370 : }
1371 :
1372 : impl StorageIoTime {
1373 120 : fn new() -> Self {
1374 120 : let storage_io_histogram_vec = register_histogram_vec!(
1375 120 : "pageserver_io_operations_seconds",
1376 120 : "Time spent in IO operations",
1377 120 : &["operation"],
1378 120 : STORAGE_IO_TIME_BUCKETS.into()
1379 120 : )
1380 120 : .expect("failed to define a metric");
1381 1200 : let metrics = std::array::from_fn(|i| {
1382 1200 : let op = StorageIoOperation::from_repr(i).unwrap();
1383 1200 : storage_io_histogram_vec
1384 1200 : .get_metric_with_label_values(&[op.as_str()])
1385 1200 : .unwrap()
1386 1200 : });
1387 120 : Self { metrics }
1388 120 : }
1389 :
1390 492669 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1391 492669 : &self.metrics[op as usize]
1392 492669 : }
1393 : }
1394 :
1395 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1396 :
1397 : #[derive(Clone, Copy)]
1398 : #[repr(usize)]
1399 : pub(crate) enum StorageIoSizeOperation {
1400 : Read,
1401 : Write,
1402 : }
1403 :
1404 : impl StorageIoSizeOperation {
1405 : pub(crate) const VARIANTS: &'static [&'static str] = &["read", "write"];
1406 :
1407 748 : fn as_str(&self) -> &'static str {
1408 748 : Self::VARIANTS[*self as usize]
1409 748 : }
1410 : }
1411 :
1412 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1413 141 : pub(crate) static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1414 141 : register_uint_gauge_vec!(
1415 141 : "pageserver_io_operations_bytes_total",
1416 141 : "Total amount of bytes read/written in IO operations",
1417 141 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1418 141 : )
1419 141 : .expect("failed to define a metric")
1420 141 : });
1421 :
1422 : #[derive(Clone, Debug)]
1423 : pub(crate) struct StorageIoSizeMetrics {
1424 : pub read: UIntGauge,
1425 : pub write: UIntGauge,
1426 : }
1427 :
1428 : impl StorageIoSizeMetrics {
1429 374 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1430 374 : let read = STORAGE_IO_SIZE
1431 374 : .get_metric_with_label_values(&[
1432 374 : StorageIoSizeOperation::Read.as_str(),
1433 374 : tenant_id,
1434 374 : shard_id,
1435 374 : timeline_id,
1436 374 : ])
1437 374 : .unwrap();
1438 374 : let write = STORAGE_IO_SIZE
1439 374 : .get_metric_with_label_values(&[
1440 374 : StorageIoSizeOperation::Write.as_str(),
1441 374 : tenant_id,
1442 374 : shard_id,
1443 374 : timeline_id,
1444 374 : ])
1445 374 : .unwrap();
1446 374 : Self { read, write }
1447 374 : }
1448 : }
1449 :
1450 : #[cfg(not(test))]
1451 : pub(crate) mod virtual_file_descriptor_cache {
1452 : use super::*;
1453 :
1454 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1455 0 : register_uint_gauge!(
1456 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1457 0 : "Maximum number of open file descriptors in the cache."
1458 0 : )
1459 0 : .unwrap()
1460 0 : });
1461 :
1462 : // SIZE_CURRENT: derive it like so:
1463 : // ```
1464 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1465 : // -ignoring(operation)
1466 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1467 : // ```
1468 : }
1469 :
1470 : #[cfg(not(test))]
1471 : pub(crate) mod virtual_file_io_engine {
1472 : use super::*;
1473 :
1474 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1475 0 : register_uint_gauge_vec!(
1476 0 : "pageserver_virtual_file_io_engine_kind",
1477 0 : "The configured io engine for VirtualFile",
1478 0 : &["kind"],
1479 0 : )
1480 0 : .unwrap()
1481 0 : });
1482 : }
1483 :
1484 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1485 : pub(crate) struct SmgrOpTimerInner {
1486 : global_execution_latency_histo: Histogram,
1487 : per_timeline_execution_latency_histo: Option<Histogram>,
1488 :
1489 : global_batch_wait_time: Histogram,
1490 : per_timeline_batch_wait_time: Histogram,
1491 :
1492 : global_flush_in_progress_micros: IntCounter,
1493 : per_timeline_flush_in_progress_micros: IntCounter,
1494 :
1495 : throttling: Arc<tenant_throttling::Pagestream>,
1496 :
1497 : timings: SmgrOpTimerState,
1498 : }
1499 :
1500 : /// The stages of request processing are represented by the enum variants.
1501 : /// Used as part of [`SmgrOpTimerInner::timings`].
1502 : ///
1503 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1504 : /// transition points.
1505 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1506 : /// to the next state.
1507 : ///
1508 : /// Each request goes through every stage, in all configurations.
1509 : ///
1510 : #[derive(Debug)]
1511 : enum SmgrOpTimerState {
1512 : Received {
1513 : // In the future, we may want to track the full time the request spent
1514 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1515 : // `received_at` would be used for that.
1516 : #[allow(dead_code)]
1517 : received_at: Instant,
1518 : },
1519 : Throttling {
1520 : throttle_started_at: Instant,
1521 : },
1522 : Batching {
1523 : throttle_done_at: Instant,
1524 : },
1525 : Executing {
1526 : execution_started_at: Instant,
1527 : },
1528 : Flushing,
1529 : // NB: when adding observation points, remember to update the Drop impl.
1530 : }
1531 :
1532 : // NB: when adding observation points, remember to update the Drop impl.
1533 : impl SmgrOpTimer {
1534 : /// See [`SmgrOpTimerState`] for more context.
1535 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1536 0 : let Some(inner) = self.0.as_mut() else {
1537 0 : return;
1538 : };
1539 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1540 0 : return;
1541 : };
1542 0 : inner.throttling.count_accounted_start.inc();
1543 0 : inner.timings = SmgrOpTimerState::Throttling {
1544 0 : throttle_started_at: at,
1545 0 : };
1546 0 : }
1547 :
1548 : /// See [`SmgrOpTimerState`] for more context.
1549 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1550 0 : let Some(inner) = self.0.as_mut() else {
1551 0 : return;
1552 : };
1553 : let SmgrOpTimerState::Throttling {
1554 0 : throttle_started_at,
1555 0 : } = &inner.timings
1556 : else {
1557 0 : return;
1558 : };
1559 0 : inner.throttling.count_accounted_finish.inc();
1560 0 : match throttle {
1561 0 : ThrottleResult::NotThrottled { end } => {
1562 0 : inner.timings = SmgrOpTimerState::Batching {
1563 0 : throttle_done_at: end,
1564 0 : };
1565 0 : }
1566 0 : ThrottleResult::Throttled { end } => {
1567 0 : // update metrics
1568 0 : inner.throttling.count_throttled.inc();
1569 0 : inner
1570 0 : .throttling
1571 0 : .wait_time
1572 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1573 0 : // state transition
1574 0 : inner.timings = SmgrOpTimerState::Batching {
1575 0 : throttle_done_at: end,
1576 0 : };
1577 0 : }
1578 : }
1579 0 : }
1580 :
1581 : /// See [`SmgrOpTimerState`] for more context.
1582 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1583 0 : let Some(inner) = self.0.as_mut() else {
1584 0 : return;
1585 : };
1586 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1587 0 : return;
1588 : };
1589 : // update metrics
1590 0 : let batch = at - *throttle_done_at;
1591 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1592 0 : inner
1593 0 : .per_timeline_batch_wait_time
1594 0 : .observe(batch.as_secs_f64());
1595 0 : // state transition
1596 0 : inner.timings = SmgrOpTimerState::Executing {
1597 0 : execution_started_at: at,
1598 0 : }
1599 0 : }
1600 :
1601 : /// For all but the first caller, this is a no-op.
1602 : /// The first callers receives Some, subsequent ones None.
1603 : ///
1604 : /// See [`SmgrOpTimerState`] for more context.
1605 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1606 : // NB: unlike the other observe_* methods, this one take()s.
1607 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1608 0 : let Some(mut inner) = self.0.take() else {
1609 0 : return None;
1610 : };
1611 : let SmgrOpTimerState::Executing {
1612 0 : execution_started_at,
1613 0 : } = &inner.timings
1614 : else {
1615 0 : return None;
1616 : };
1617 : // update metrics
1618 0 : let execution = at - *execution_started_at;
1619 0 : inner
1620 0 : .global_execution_latency_histo
1621 0 : .observe(execution.as_secs_f64());
1622 0 : if let Some(per_timeline_execution_latency_histo) =
1623 0 : &inner.per_timeline_execution_latency_histo
1624 0 : {
1625 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1626 0 : }
1627 :
1628 : // state transition
1629 0 : inner.timings = SmgrOpTimerState::Flushing;
1630 0 :
1631 0 : // return the flush in progress object which
1632 0 : // will do the remaining metrics updates
1633 0 : let SmgrOpTimerInner {
1634 0 : global_flush_in_progress_micros,
1635 0 : per_timeline_flush_in_progress_micros,
1636 0 : ..
1637 0 : } = inner;
1638 0 : Some(SmgrOpFlushInProgress {
1639 0 : global_micros: global_flush_in_progress_micros,
1640 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1641 0 : })
1642 0 : }
1643 : }
1644 :
1645 : /// The last stage of request processing is serializing and flushing the request
1646 : /// into the TCP connection. We want to make slow flushes observable
1647 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1648 : /// to periodically bump the metric.
1649 : ///
1650 : /// If in the future we decide that we're not interested in live updates, we can
1651 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1652 : /// and remove this struct from the code base.
1653 : pub(crate) struct SmgrOpFlushInProgress {
1654 : global_micros: IntCounter,
1655 : per_timeline_micros: IntCounter,
1656 : }
1657 :
1658 : impl Drop for SmgrOpTimer {
1659 0 : fn drop(&mut self) {
1660 0 : // In case of early drop, update any of the remaining metrics with
1661 0 : // observations so that (started,finished) counter pairs balance out
1662 0 : // and all counters on the latency path have the the same number of
1663 0 : // observations.
1664 0 : // It's technically lying and it would be better if each metric had
1665 0 : // a separate label or similar for cancelled requests.
1666 0 : // But we don't have that right now and counter pairs balancing
1667 0 : // out is useful when using the metrics in panels and whatnot.
1668 0 : let now = Instant::now();
1669 0 : self.observe_throttle_start(now);
1670 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1671 0 : self.observe_execution_start(now);
1672 0 : let maybe_flush_timer = self.observe_execution_end(now);
1673 0 : drop(maybe_flush_timer);
1674 0 : }
1675 : }
1676 :
1677 : impl SmgrOpFlushInProgress {
1678 : /// The caller must guarantee that `socket_fd`` outlives this function.
1679 0 : pub(crate) async fn measure<Fut, O>(
1680 0 : self,
1681 0 : started_at: Instant,
1682 0 : mut fut: Fut,
1683 0 : socket_fd: RawFd,
1684 0 : ) -> O
1685 0 : where
1686 0 : Fut: std::future::Future<Output = O>,
1687 0 : {
1688 0 : let mut fut = std::pin::pin!(fut);
1689 0 :
1690 0 : let mut logged = false;
1691 0 : let mut last_counter_increment_at = started_at;
1692 0 : let mut observe_guard = scopeguard::guard(
1693 0 : |is_timeout| {
1694 0 : let now = Instant::now();
1695 0 :
1696 0 : // Increment counter
1697 0 : {
1698 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1699 0 : self.global_micros
1700 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1701 0 : self.per_timeline_micros
1702 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1703 0 : last_counter_increment_at = now;
1704 0 : }
1705 0 :
1706 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1707 0 : if is_timeout || logged {
1708 0 : logged = true;
1709 0 : let elapsed_total = now - started_at;
1710 0 : let msg = if is_timeout {
1711 0 : "slow flush ongoing"
1712 : } else {
1713 0 : "slow flush completed or cancelled"
1714 : };
1715 :
1716 0 : let (inq, outq) = {
1717 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1718 0 : #[cfg(target_os = "linux")]
1719 0 : unsafe {
1720 0 : (
1721 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1722 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1723 0 : )
1724 0 : }
1725 0 : #[cfg(not(target_os = "linux"))]
1726 0 : {
1727 0 : _ = socket_fd; // appease unused lint on macOS
1728 0 : (-1, -1)
1729 0 : }
1730 0 : };
1731 0 :
1732 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1733 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1734 0 : }
1735 0 : },
1736 0 : |mut observe| {
1737 0 : observe(false);
1738 0 : },
1739 0 : );
1740 :
1741 : loop {
1742 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1743 0 : Ok(v) => return v,
1744 0 : Err(_timeout) => {
1745 0 : (*observe_guard)(true);
1746 0 : }
1747 : }
1748 : }
1749 0 : }
1750 : }
1751 :
1752 : #[derive(
1753 : Debug,
1754 : Clone,
1755 : Copy,
1756 : IntoStaticStr,
1757 : strum_macros::EnumCount,
1758 0 : strum_macros::EnumIter,
1759 : strum_macros::FromRepr,
1760 : enum_map::Enum,
1761 : )]
1762 : #[strum(serialize_all = "snake_case")]
1763 : pub enum SmgrQueryType {
1764 : GetRelExists,
1765 : GetRelSize,
1766 : GetPageAtLsn,
1767 : GetDbSize,
1768 : GetSlruSegment,
1769 : #[cfg(feature = "testing")]
1770 : Test,
1771 : }
1772 :
1773 : #[derive(
1774 : Debug,
1775 : Clone,
1776 : Copy,
1777 : IntoStaticStr,
1778 : strum_macros::EnumCount,
1779 45 : strum_macros::EnumIter,
1780 : strum_macros::FromRepr,
1781 : enum_map::Enum,
1782 : )]
1783 : #[strum(serialize_all = "snake_case")]
1784 : pub enum GetPageBatchBreakReason {
1785 : BatchFull,
1786 : NonBatchableRequest,
1787 : NonUniformLsn,
1788 : SamePageAtDifferentLsn,
1789 : NonUniformTimeline,
1790 : ExecutorSteal,
1791 : #[cfg(feature = "testing")]
1792 : NonUniformKey,
1793 : }
1794 :
1795 : pub(crate) struct SmgrQueryTimePerTimeline {
1796 : global_started: [IntCounter; SmgrQueryType::COUNT],
1797 : global_latency: [Histogram; SmgrQueryType::COUNT],
1798 : per_timeline_getpage_started: IntCounter,
1799 : per_timeline_getpage_latency: Histogram,
1800 : global_batch_size: Histogram,
1801 : per_timeline_batch_size: Histogram,
1802 : global_flush_in_progress_micros: IntCounter,
1803 : per_timeline_flush_in_progress_micros: IntCounter,
1804 : global_batch_wait_time: Histogram,
1805 : per_timeline_batch_wait_time: Histogram,
1806 : global_batch_break_reason: [IntCounter; GetPageBatchBreakReason::COUNT],
1807 : per_timeline_batch_break_reason: GetPageBatchBreakReasonTimelineMetrics,
1808 : throttling: Arc<tenant_throttling::Pagestream>,
1809 : }
1810 :
1811 107 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1812 107 : register_int_counter_vec!(
1813 107 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1814 107 : "pageserver_smgr_query_started_global_count",
1815 107 : "Number of smgr queries started, aggregated by query type.",
1816 107 : &["smgr_query_type"],
1817 107 : )
1818 107 : .expect("failed to define a metric")
1819 107 : });
1820 :
1821 107 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1822 107 : register_int_counter_vec!(
1823 107 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1824 107 : "pageserver_smgr_query_started_count",
1825 107 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1826 107 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1827 107 : )
1828 107 : .expect("failed to define a metric")
1829 107 : });
1830 :
1831 : /// Per-timeline smgr histogram buckets should be the same as the compute buckets, such that the
1832 : /// metrics are comparable across compute and Pageserver. See also:
1833 : /// <https://github.com/neondatabase/neon/blob/1a87975d956a8ad17ec8b85da32a137ec4893fcc/pgxn/neon/neon_perf_counters.h#L18-L27>
1834 : /// <https://github.com/neondatabase/flux-fleet/blob/556182a939edda87ff1d85a6b02e5cec901e0e9e/apps/base/compute-metrics/scrape-compute-sql-exporter.yaml#L29-L35>
1835 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] =
1836 : &[0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.1, 1.0, 3.0];
1837 :
1838 107 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1839 107 : register_histogram_vec!(
1840 107 : "pageserver_smgr_query_seconds",
1841 107 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1842 107 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1843 107 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1844 107 : )
1845 107 : .expect("failed to define a metric")
1846 107 : });
1847 :
1848 107 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1849 107 : [
1850 107 : 1,
1851 107 : 10,
1852 107 : 20,
1853 107 : 40,
1854 107 : 60,
1855 107 : 80,
1856 107 : 100,
1857 107 : 200,
1858 107 : 300,
1859 107 : 400,
1860 107 : 500,
1861 107 : 600,
1862 107 : 700,
1863 107 : 800,
1864 107 : 900,
1865 107 : 1_000, // 1ms
1866 107 : 2_000,
1867 107 : 4_000,
1868 107 : 6_000,
1869 107 : 8_000,
1870 107 : 10_000, // 10ms
1871 107 : 20_000,
1872 107 : 40_000,
1873 107 : 60_000,
1874 107 : 80_000,
1875 107 : 100_000,
1876 107 : 200_000,
1877 107 : 400_000,
1878 107 : 600_000,
1879 107 : 800_000,
1880 107 : 1_000_000, // 1s
1881 107 : 2_000_000,
1882 107 : 4_000_000,
1883 107 : 6_000_000,
1884 107 : 8_000_000,
1885 107 : 10_000_000, // 10s
1886 107 : 20_000_000,
1887 107 : 50_000_000,
1888 107 : 100_000_000,
1889 107 : 200_000_000,
1890 107 : 1_000_000_000, // 1000s
1891 107 : ]
1892 107 : .into_iter()
1893 107 : .map(Duration::from_micros)
1894 4387 : .map(|d| d.as_secs_f64())
1895 107 : .collect()
1896 107 : });
1897 :
1898 107 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1899 107 : register_histogram_vec!(
1900 107 : "pageserver_smgr_query_seconds_global",
1901 107 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1902 107 : &["smgr_query_type"],
1903 107 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1904 107 : )
1905 107 : .expect("failed to define a metric")
1906 107 : });
1907 :
1908 107 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1909 107 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1910 3424 : .map(|v| v.into())
1911 107 : .collect()
1912 107 : });
1913 :
1914 107 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1915 107 : register_histogram!(
1916 107 : "pageserver_page_service_batch_size_global",
1917 107 : "Batch size of pageserver page service requests",
1918 107 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1919 107 : )
1920 107 : .expect("failed to define a metric")
1921 107 : });
1922 :
1923 107 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1924 107 : let mut buckets = Vec::new();
1925 749 : for i in 0.. {
1926 749 : let bucket = 1 << i;
1927 749 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1928 107 : break;
1929 642 : }
1930 642 : buckets.push(bucket.into());
1931 : }
1932 107 : buckets
1933 107 : });
1934 :
1935 107 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1936 107 : register_histogram_vec!(
1937 107 : "pageserver_page_service_batch_size",
1938 107 : "Batch size of pageserver page service requests",
1939 107 : &["tenant_id", "shard_id", "timeline_id"],
1940 107 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1941 107 : )
1942 107 : .expect("failed to define a metric")
1943 107 : });
1944 :
1945 107 : static PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1946 107 : register_int_counter_vec!(
1947 107 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1948 107 : "pageserver_page_service_batch_break_reason_global",
1949 107 : "Reason for breaking batches of get page requests",
1950 107 : &["reason"],
1951 107 : )
1952 107 : .expect("failed to define a metric")
1953 107 : });
1954 :
1955 : struct GetPageBatchBreakReasonTimelineMetrics {
1956 : map: EnumMap<GetPageBatchBreakReason, IntCounter>,
1957 : }
1958 :
1959 : impl GetPageBatchBreakReasonTimelineMetrics {
1960 233 : fn new(tenant_id: &str, shard_slug: &str, timeline_id: &str) -> Self {
1961 233 : GetPageBatchBreakReasonTimelineMetrics {
1962 1631 : map: EnumMap::from_array(std::array::from_fn(|reason_idx| {
1963 1631 : let reason = GetPageBatchBreakReason::from_usize(reason_idx);
1964 1631 : PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.with_label_values(&[
1965 1631 : tenant_id,
1966 1631 : shard_slug,
1967 1631 : timeline_id,
1968 1631 : reason.into(),
1969 1631 : ])
1970 1631 : })),
1971 233 : }
1972 233 : }
1973 :
1974 0 : fn inc(&self, reason: GetPageBatchBreakReason) {
1975 0 : self.map[reason].inc()
1976 0 : }
1977 : }
1978 :
1979 107 : static PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1980 107 : register_int_counter_vec!(
1981 107 : "pageserver_page_service_batch_break_reason",
1982 107 : "Reason for breaking batches of get page requests",
1983 107 : &["tenant_id", "shard_id", "timeline_id", "reason"],
1984 107 : )
1985 107 : .expect("failed to define a metric")
1986 107 : });
1987 :
1988 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1989 0 : register_int_gauge_vec!(
1990 0 : "pageserver_page_service_config_max_batch_size",
1991 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1992 0 : Labels expose more of the configuration parameters.",
1993 0 : &["mode", "execution", "batching"]
1994 0 : )
1995 0 : .expect("failed to define a metric")
1996 0 : });
1997 :
1998 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1999 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
2000 0 : let (label_values, value) = match conf {
2001 0 : PageServicePipeliningConfig::Serial => (["serial", "-", "-"], 1),
2002 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
2003 0 : max_batch_size,
2004 0 : execution,
2005 0 : batching,
2006 0 : }) => {
2007 0 : let mode = "pipelined";
2008 0 : let execution = match execution {
2009 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
2010 0 : "concurrent-futures"
2011 : }
2012 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
2013 : };
2014 0 : let batching = match batching {
2015 0 : PageServiceProtocolPipelinedBatchingStrategy::UniformLsn => "uniform-lsn",
2016 0 : PageServiceProtocolPipelinedBatchingStrategy::ScatteredLsn => "scattered-lsn",
2017 : };
2018 :
2019 0 : ([mode, execution, batching], max_batch_size.get())
2020 : }
2021 : };
2022 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
2023 0 : .with_label_values(&label_values)
2024 0 : .set(value.try_into().unwrap());
2025 0 : }
2026 :
2027 107 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
2028 107 : register_int_counter_vec!(
2029 107 : "pageserver_page_service_pagestream_flush_in_progress_micros",
2030 107 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
2031 107 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
2032 107 : easily discoverable in monitoring. \
2033 107 : Hence, this is NOT a completion latency historgram.",
2034 107 : &["tenant_id", "shard_id", "timeline_id"],
2035 107 : )
2036 107 : .expect("failed to define a metric")
2037 107 : });
2038 :
2039 107 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
2040 107 : register_int_counter!(
2041 107 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
2042 107 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
2043 107 : )
2044 107 : .expect("failed to define a metric")
2045 107 : });
2046 :
2047 107 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2048 107 : register_histogram_vec!(
2049 107 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
2050 107 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
2051 107 : &["tenant_id", "shard_id", "timeline_id"],
2052 107 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
2053 107 : )
2054 107 : .expect("failed to define a metric")
2055 107 : });
2056 :
2057 107 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
2058 107 : register_histogram!(
2059 107 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
2060 107 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
2061 107 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
2062 107 : )
2063 107 : .expect("failed to define a metric")
2064 107 : });
2065 :
2066 : impl SmgrQueryTimePerTimeline {
2067 233 : pub(crate) fn new(
2068 233 : tenant_shard_id: &TenantShardId,
2069 233 : timeline_id: &TimelineId,
2070 233 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
2071 233 : ) -> Self {
2072 233 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2073 233 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
2074 233 : let timeline_id = timeline_id.to_string();
2075 1398 : let global_started = std::array::from_fn(|i| {
2076 1398 : let op = SmgrQueryType::from_repr(i).unwrap();
2077 1398 : SMGR_QUERY_STARTED_GLOBAL
2078 1398 : .get_metric_with_label_values(&[op.into()])
2079 1398 : .unwrap()
2080 1398 : });
2081 1398 : let global_latency = std::array::from_fn(|i| {
2082 1398 : let op = SmgrQueryType::from_repr(i).unwrap();
2083 1398 : SMGR_QUERY_TIME_GLOBAL
2084 1398 : .get_metric_with_label_values(&[op.into()])
2085 1398 : .unwrap()
2086 1398 : });
2087 233 :
2088 233 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
2089 233 : .get_metric_with_label_values(&[
2090 233 : SmgrQueryType::GetPageAtLsn.into(),
2091 233 : &tenant_id,
2092 233 : &shard_slug,
2093 233 : &timeline_id,
2094 233 : ])
2095 233 : .unwrap();
2096 233 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
2097 233 : .get_metric_with_label_values(&[
2098 233 : SmgrQueryType::GetPageAtLsn.into(),
2099 233 : &tenant_id,
2100 233 : &shard_slug,
2101 233 : &timeline_id,
2102 233 : ])
2103 233 : .unwrap();
2104 233 :
2105 233 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
2106 233 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
2107 233 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2108 233 : .unwrap();
2109 233 :
2110 233 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
2111 233 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
2112 233 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2113 233 : .unwrap();
2114 233 :
2115 1631 : let global_batch_break_reason = std::array::from_fn(|i| {
2116 1631 : let reason = GetPageBatchBreakReason::from_usize(i);
2117 1631 : PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL
2118 1631 : .get_metric_with_label_values(&[reason.into()])
2119 1631 : .unwrap()
2120 1631 : });
2121 233 : let per_timeline_batch_break_reason =
2122 233 : GetPageBatchBreakReasonTimelineMetrics::new(&tenant_id, &shard_slug, &timeline_id);
2123 233 :
2124 233 : let global_flush_in_progress_micros =
2125 233 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
2126 233 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
2127 233 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2128 233 : .unwrap();
2129 233 :
2130 233 : Self {
2131 233 : global_started,
2132 233 : global_latency,
2133 233 : per_timeline_getpage_latency,
2134 233 : per_timeline_getpage_started,
2135 233 : global_batch_size,
2136 233 : per_timeline_batch_size,
2137 233 : global_flush_in_progress_micros,
2138 233 : per_timeline_flush_in_progress_micros,
2139 233 : global_batch_wait_time,
2140 233 : per_timeline_batch_wait_time,
2141 233 : global_batch_break_reason,
2142 233 : per_timeline_batch_break_reason,
2143 233 : throttling: pagestream_throttle_metrics,
2144 233 : }
2145 233 : }
2146 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
2147 0 : self.global_started[op as usize].inc();
2148 :
2149 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
2150 0 : self.per_timeline_getpage_started.inc();
2151 0 : Some(self.per_timeline_getpage_latency.clone())
2152 : } else {
2153 0 : None
2154 : };
2155 :
2156 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
2157 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
2158 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
2159 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
2160 0 : per_timeline_flush_in_progress_micros: self
2161 0 : .per_timeline_flush_in_progress_micros
2162 0 : .clone(),
2163 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
2164 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
2165 0 : throttling: self.throttling.clone(),
2166 0 : timings: SmgrOpTimerState::Received { received_at },
2167 0 : }))
2168 0 : }
2169 :
2170 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
2171 0 : pub(crate) fn observe_getpage_batch_start(
2172 0 : &self,
2173 0 : batch_size: usize,
2174 0 : break_reason: GetPageBatchBreakReason,
2175 0 : ) {
2176 0 : self.global_batch_size.observe(batch_size as f64);
2177 0 : self.per_timeline_batch_size.observe(batch_size as f64);
2178 0 :
2179 0 : self.global_batch_break_reason[break_reason.into_usize()].inc();
2180 0 : self.per_timeline_batch_break_reason.inc(break_reason);
2181 0 : }
2182 : }
2183 :
2184 : // keep in sync with control plane Go code so that we can validate
2185 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
2186 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
2187 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
2188 0 : [
2189 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
2190 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
2191 0 : ]
2192 0 : .map(|ms| (ms as f64) / 1000.0)
2193 0 : });
2194 :
2195 : pub(crate) struct BasebackupQueryTime {
2196 : ok: Histogram,
2197 : error: Histogram,
2198 : client_error: Histogram,
2199 : }
2200 :
2201 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
2202 0 : let vec = register_histogram_vec!(
2203 0 : "pageserver_basebackup_query_seconds",
2204 0 : "Histogram of basebackup queries durations, by result type",
2205 0 : &["result"],
2206 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
2207 0 : )
2208 0 : .expect("failed to define a metric");
2209 0 : BasebackupQueryTime {
2210 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
2211 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
2212 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
2213 0 : }
2214 0 : });
2215 :
2216 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
2217 : parent: &'a BasebackupQueryTime,
2218 : start: std::time::Instant,
2219 : }
2220 :
2221 : impl BasebackupQueryTime {
2222 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
2223 0 : let start = Instant::now();
2224 0 : BasebackupQueryTimeOngoingRecording {
2225 0 : parent: self,
2226 0 : start,
2227 0 : }
2228 0 : }
2229 : }
2230 :
2231 : impl BasebackupQueryTimeOngoingRecording<'_> {
2232 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
2233 0 : let elapsed = self.start.elapsed().as_secs_f64();
2234 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
2235 0 : let metric = match res {
2236 0 : Ok(_) => &self.parent.ok,
2237 : Err(QueryError::Shutdown) | Err(QueryError::Reconnect) => {
2238 : // Do not observe ok/err for shutdown/reconnect.
2239 : // Reconnect error might be raised when the operation is waiting for LSN and the tenant shutdown interrupts
2240 : // the operation. A reconnect error will be issued and the client will retry.
2241 0 : return;
2242 : }
2243 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
2244 0 : if is_expected_io_error(io_error) =>
2245 0 : {
2246 0 : &self.parent.client_error
2247 : }
2248 0 : Err(_) => &self.parent.error,
2249 : };
2250 0 : metric.observe(elapsed);
2251 0 : }
2252 : }
2253 :
2254 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2255 0 : register_int_counter_pair_vec!(
2256 0 : "pageserver_live_connections_started",
2257 0 : "Number of network connections that we started handling",
2258 0 : "pageserver_live_connections_finished",
2259 0 : "Number of network connections that we finished handling",
2260 0 : &["pageserver_connection_kind"]
2261 0 : )
2262 0 : .expect("failed to define a metric")
2263 0 : });
2264 :
2265 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2266 : pub(crate) enum ComputeCommandKind {
2267 : PageStreamV3,
2268 : PageStreamV2,
2269 : Basebackup,
2270 : Fullbackup,
2271 : LeaseLsn,
2272 : }
2273 :
2274 : pub(crate) struct ComputeCommandCounters {
2275 : map: EnumMap<ComputeCommandKind, IntCounter>,
2276 : }
2277 :
2278 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2279 0 : let inner = register_int_counter_vec!(
2280 0 : "pageserver_compute_commands",
2281 0 : "Number of compute -> pageserver commands processed",
2282 0 : &["command"]
2283 0 : )
2284 0 : .expect("failed to define a metric");
2285 0 :
2286 0 : ComputeCommandCounters {
2287 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2288 0 : let command = ComputeCommandKind::from_usize(i);
2289 0 : let command_str: &'static str = command.into();
2290 0 : inner.with_label_values(&[command_str])
2291 0 : })),
2292 0 : }
2293 0 : });
2294 :
2295 : impl ComputeCommandCounters {
2296 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2297 0 : &self.map[command]
2298 0 : }
2299 : }
2300 :
2301 : // remote storage metrics
2302 :
2303 105 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2304 105 : register_int_counter_pair_vec!(
2305 105 : "pageserver_remote_timeline_client_calls_started",
2306 105 : "Number of started calls to remote timeline client.",
2307 105 : "pageserver_remote_timeline_client_calls_finished",
2308 105 : "Number of finshed calls to remote timeline client.",
2309 105 : &[
2310 105 : "tenant_id",
2311 105 : "shard_id",
2312 105 : "timeline_id",
2313 105 : "file_kind",
2314 105 : "op_kind"
2315 105 : ],
2316 105 : )
2317 105 : .unwrap()
2318 105 : });
2319 :
2320 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2321 104 : Lazy::new(|| {
2322 104 : register_int_counter_vec!(
2323 104 : "pageserver_remote_timeline_client_bytes_started",
2324 104 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2325 104 : The increment happens when the operation is scheduled.",
2326 104 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2327 104 : )
2328 104 : .expect("failed to define a metric")
2329 104 : });
2330 :
2331 104 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2332 104 : register_int_counter_vec!(
2333 104 : "pageserver_remote_timeline_client_bytes_finished",
2334 104 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2335 104 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2336 104 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2337 104 : )
2338 104 : .expect("failed to define a metric")
2339 104 : });
2340 :
2341 : pub(crate) struct TenantManagerMetrics {
2342 : tenant_slots_attached: UIntGauge,
2343 : tenant_slots_secondary: UIntGauge,
2344 : tenant_slots_inprogress: UIntGauge,
2345 : pub(crate) tenant_slot_writes: IntCounter,
2346 : pub(crate) unexpected_errors: IntCounter,
2347 : }
2348 :
2349 : impl TenantManagerMetrics {
2350 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2351 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2352 1 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2353 1 : match slot {
2354 0 : TenantSlot::Attached(_) => {
2355 0 : self.tenant_slots_attached.inc();
2356 0 : }
2357 0 : TenantSlot::Secondary(_) => {
2358 0 : self.tenant_slots_secondary.inc();
2359 0 : }
2360 1 : TenantSlot::InProgress(_) => {
2361 1 : self.tenant_slots_inprogress.inc();
2362 1 : }
2363 : }
2364 1 : }
2365 :
2366 1 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2367 1 : match slot {
2368 1 : TenantSlot::Attached(_) => {
2369 1 : self.tenant_slots_attached.dec();
2370 1 : }
2371 0 : TenantSlot::Secondary(_) => {
2372 0 : self.tenant_slots_secondary.dec();
2373 0 : }
2374 0 : TenantSlot::InProgress(_) => {
2375 0 : self.tenant_slots_inprogress.dec();
2376 0 : }
2377 : }
2378 1 : }
2379 :
2380 : #[cfg(all(debug_assertions, not(test)))]
2381 0 : pub(crate) fn slots_total(&self) -> u64 {
2382 0 : self.tenant_slots_attached.get()
2383 0 : + self.tenant_slots_secondary.get()
2384 0 : + self.tenant_slots_inprogress.get()
2385 0 : }
2386 : }
2387 :
2388 1 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2389 1 : let tenant_slots = register_uint_gauge_vec!(
2390 1 : "pageserver_tenant_manager_slots",
2391 1 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2392 1 : &["mode"]
2393 1 : )
2394 1 : .expect("failed to define a metric");
2395 1 : TenantManagerMetrics {
2396 1 : tenant_slots_attached: tenant_slots
2397 1 : .get_metric_with_label_values(&["attached"])
2398 1 : .unwrap(),
2399 1 : tenant_slots_secondary: tenant_slots
2400 1 : .get_metric_with_label_values(&["secondary"])
2401 1 : .unwrap(),
2402 1 : tenant_slots_inprogress: tenant_slots
2403 1 : .get_metric_with_label_values(&["inprogress"])
2404 1 : .unwrap(),
2405 1 : tenant_slot_writes: register_int_counter!(
2406 1 : "pageserver_tenant_manager_slot_writes",
2407 1 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2408 1 : )
2409 1 : .expect("failed to define a metric"),
2410 1 : unexpected_errors: register_int_counter!(
2411 1 : "pageserver_tenant_manager_unexpected_errors_total",
2412 1 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2413 1 : )
2414 1 : .expect("failed to define a metric"),
2415 1 : }
2416 1 : });
2417 :
2418 : pub(crate) struct DeletionQueueMetrics {
2419 : pub(crate) keys_submitted: IntCounter,
2420 : pub(crate) keys_dropped: IntCounter,
2421 : pub(crate) keys_executed: IntCounter,
2422 : pub(crate) keys_validated: IntCounter,
2423 : pub(crate) dropped_lsn_updates: IntCounter,
2424 : pub(crate) unexpected_errors: IntCounter,
2425 : pub(crate) remote_errors: IntCounterVec,
2426 : }
2427 18 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2428 18 : DeletionQueueMetrics{
2429 18 :
2430 18 : keys_submitted: register_int_counter!(
2431 18 : "pageserver_deletion_queue_submitted_total",
2432 18 : "Number of objects submitted for deletion"
2433 18 : )
2434 18 : .expect("failed to define a metric"),
2435 18 :
2436 18 : keys_dropped: register_int_counter!(
2437 18 : "pageserver_deletion_queue_dropped_total",
2438 18 : "Number of object deletions dropped due to stale generation."
2439 18 : )
2440 18 : .expect("failed to define a metric"),
2441 18 :
2442 18 : keys_executed: register_int_counter!(
2443 18 : "pageserver_deletion_queue_executed_total",
2444 18 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2445 18 : )
2446 18 : .expect("failed to define a metric"),
2447 18 :
2448 18 : keys_validated: register_int_counter!(
2449 18 : "pageserver_deletion_queue_validated_total",
2450 18 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2451 18 : )
2452 18 : .expect("failed to define a metric"),
2453 18 :
2454 18 : dropped_lsn_updates: register_int_counter!(
2455 18 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2456 18 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2457 18 : )
2458 18 : .expect("failed to define a metric"),
2459 18 : unexpected_errors: register_int_counter!(
2460 18 : "pageserver_deletion_queue_unexpected_errors_total",
2461 18 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2462 18 : )
2463 18 : .expect("failed to define a metric"),
2464 18 : remote_errors: register_int_counter_vec!(
2465 18 : "pageserver_deletion_queue_remote_errors_total",
2466 18 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2467 18 : &["op_kind"],
2468 18 : )
2469 18 : .expect("failed to define a metric")
2470 18 : }
2471 18 : });
2472 :
2473 : pub(crate) struct SecondaryModeMetrics {
2474 : pub(crate) upload_heatmap: IntCounter,
2475 : pub(crate) upload_heatmap_errors: IntCounter,
2476 : pub(crate) upload_heatmap_duration: Histogram,
2477 : pub(crate) download_heatmap: IntCounter,
2478 : pub(crate) download_layer: IntCounter,
2479 : }
2480 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2481 0 : SecondaryModeMetrics {
2482 0 : upload_heatmap: register_int_counter!(
2483 0 : "pageserver_secondary_upload_heatmap",
2484 0 : "Number of heatmaps written to remote storage by attached tenants"
2485 0 : )
2486 0 : .expect("failed to define a metric"),
2487 0 : upload_heatmap_errors: register_int_counter!(
2488 0 : "pageserver_secondary_upload_heatmap_errors",
2489 0 : "Failures writing heatmap to remote storage"
2490 0 : )
2491 0 : .expect("failed to define a metric"),
2492 0 : upload_heatmap_duration: register_histogram!(
2493 0 : "pageserver_secondary_upload_heatmap_duration",
2494 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2495 0 : )
2496 0 : .expect("failed to define a metric"),
2497 0 : download_heatmap: register_int_counter!(
2498 0 : "pageserver_secondary_download_heatmap",
2499 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2500 0 : )
2501 0 : .expect("failed to define a metric"),
2502 0 : download_layer: register_int_counter!(
2503 0 : "pageserver_secondary_download_layer",
2504 0 : "Number of downloads of layers by secondary mode locations"
2505 0 : )
2506 0 : .expect("failed to define a metric"),
2507 0 : }
2508 0 : });
2509 :
2510 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2511 0 : register_uint_gauge_vec!(
2512 0 : "pageserver_secondary_resident_physical_size",
2513 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2514 0 : &["tenant_id", "shard_id"]
2515 0 : )
2516 0 : .expect("failed to define a metric")
2517 0 : });
2518 :
2519 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2520 0 : register_uint_gauge!(
2521 0 : "pageserver_utilization_score",
2522 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2523 0 : )
2524 0 : .expect("failed to define a metric")
2525 0 : });
2526 :
2527 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2528 0 : register_uint_gauge_vec!(
2529 0 : "pageserver_secondary_heatmap_total_size",
2530 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2531 0 : &["tenant_id", "shard_id"]
2532 0 : )
2533 0 : .expect("failed to define a metric")
2534 0 : });
2535 :
2536 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2537 : pub enum RemoteOpKind {
2538 : Upload,
2539 : Download,
2540 : Delete,
2541 : }
2542 : impl RemoteOpKind {
2543 8034 : pub fn as_str(&self) -> &'static str {
2544 8034 : match self {
2545 7543 : Self::Upload => "upload",
2546 34 : Self::Download => "download",
2547 457 : Self::Delete => "delete",
2548 : }
2549 8034 : }
2550 : }
2551 :
2552 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2553 : pub enum RemoteOpFileKind {
2554 : Layer,
2555 : Index,
2556 : }
2557 : impl RemoteOpFileKind {
2558 8034 : pub fn as_str(&self) -> &'static str {
2559 8034 : match self {
2560 5721 : Self::Layer => "layer",
2561 2313 : Self::Index => "index",
2562 : }
2563 8034 : }
2564 : }
2565 :
2566 103 : pub(crate) static REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY: Lazy<HistogramVec> = Lazy::new(|| {
2567 103 : register_histogram_vec!(
2568 103 : "pageserver_remote_timeline_client_seconds_global",
2569 103 : "Time spent on remote timeline client operations. \
2570 103 : Grouped by task_kind, file_kind, operation_kind and status. \
2571 103 : The task_kind is \
2572 103 : - for layer downloads, populated from RequestContext (primary objective of having the label) \
2573 103 : - for index downloads, set to 'unknown' \
2574 103 : - for any upload operation, set to 'RemoteUploadTask' \
2575 103 : This keeps dimensionality at bay. \
2576 103 : Does not account for time spent waiting in remote timeline client's queues.",
2577 103 : &["task_kind", "file_kind", "op_kind", "status"]
2578 103 : )
2579 103 : .expect("failed to define a metric")
2580 103 : });
2581 :
2582 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2583 0 : register_int_counter_vec!(
2584 0 : "pageserver_tenant_task_events",
2585 0 : "Number of task start/stop/fail events.",
2586 0 : &["event"],
2587 0 : )
2588 0 : .expect("Failed to register tenant_task_events metric")
2589 0 : });
2590 :
2591 : pub struct BackgroundLoopSemaphoreMetrics {
2592 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2593 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2594 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2595 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2596 : }
2597 :
2598 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2599 10 : Lazy::new(|| {
2600 10 : let counters = register_int_counter_pair_vec!(
2601 10 : "pageserver_background_loop_semaphore_wait_start_count",
2602 10 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2603 10 : "pageserver_background_loop_semaphore_wait_finish_count",
2604 10 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2605 10 : &["task"],
2606 10 : )
2607 10 : .unwrap();
2608 10 :
2609 10 : let durations = register_histogram_vec!(
2610 10 : "pageserver_background_loop_semaphore_wait_seconds",
2611 10 : "Seconds spent waiting on background loop semaphore acquisition",
2612 10 : &["task"],
2613 10 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2614 10 : )
2615 10 : .unwrap();
2616 10 :
2617 10 : let waiting_tasks = register_int_gauge_vec!(
2618 10 : "pageserver_background_loop_semaphore_waiting_tasks",
2619 10 : "Number of background loop tasks waiting for semaphore",
2620 10 : &["task"],
2621 10 : )
2622 10 : .unwrap();
2623 10 :
2624 10 : let running_tasks = register_int_gauge_vec!(
2625 10 : "pageserver_background_loop_semaphore_running_tasks",
2626 10 : "Number of background loop tasks running concurrently",
2627 10 : &["task"],
2628 10 : )
2629 10 : .unwrap();
2630 10 :
2631 10 : BackgroundLoopSemaphoreMetrics {
2632 100 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2633 100 : let kind = BackgroundLoopKind::from_usize(i);
2634 100 : counters.with_label_values(&[kind.into()])
2635 100 : })),
2636 100 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2637 100 : let kind = BackgroundLoopKind::from_usize(i);
2638 100 : durations.with_label_values(&[kind.into()])
2639 100 : })),
2640 100 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2641 100 : let kind = BackgroundLoopKind::from_usize(i);
2642 100 : waiting_tasks.with_label_values(&[kind.into()])
2643 100 : })),
2644 100 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2645 100 : let kind = BackgroundLoopKind::from_usize(i);
2646 100 : running_tasks.with_label_values(&[kind.into()])
2647 100 : })),
2648 10 : }
2649 10 : });
2650 :
2651 : impl BackgroundLoopSemaphoreMetrics {
2652 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2653 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2654 182 : pub(crate) fn record(
2655 182 : &self,
2656 182 : task: BackgroundLoopKind,
2657 182 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2658 182 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2659 182 : }
2660 : }
2661 :
2662 : /// Records metrics for a background task.
2663 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2664 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2665 : task: BackgroundLoopKind,
2666 : start: Instant,
2667 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2668 : }
2669 :
2670 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2671 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2672 : /// `wait_start_count` and `waiting_tasks`.
2673 182 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2674 182 : metrics.waiting_tasks[task].inc();
2675 182 : Self {
2676 182 : metrics,
2677 182 : task,
2678 182 : start: Instant::now(),
2679 182 : wait_counter_guard: Some(metrics.counters[task].guard()),
2680 182 : }
2681 182 : }
2682 :
2683 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2684 182 : pub fn acquired(&mut self) -> Duration {
2685 182 : let waited = self.start.elapsed();
2686 182 : self.wait_counter_guard.take().expect("already acquired");
2687 182 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2688 182 : self.metrics.waiting_tasks[self.task].dec();
2689 182 : self.metrics.running_tasks[self.task].inc();
2690 182 : waited
2691 182 : }
2692 : }
2693 :
2694 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2695 : /// The task either completed or was cancelled.
2696 182 : fn drop(&mut self) {
2697 182 : if self.wait_counter_guard.take().is_some() {
2698 0 : // Waiting.
2699 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2700 0 : self.metrics.waiting_tasks[self.task].dec();
2701 182 : } else {
2702 182 : // Running.
2703 182 : self.metrics.running_tasks[self.task].dec();
2704 182 : }
2705 182 : }
2706 : }
2707 :
2708 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2709 0 : register_int_counter_vec!(
2710 0 : "pageserver_background_loop_period_overrun_count",
2711 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2712 0 : &["task", "period"],
2713 0 : )
2714 0 : .expect("failed to define a metric")
2715 0 : });
2716 :
2717 : // walreceiver metrics
2718 :
2719 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2720 0 : register_int_counter!(
2721 0 : "pageserver_walreceiver_started_connections_total",
2722 0 : "Number of started walreceiver connections"
2723 0 : )
2724 0 : .expect("failed to define a metric")
2725 0 : });
2726 :
2727 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2728 0 : register_int_gauge!(
2729 0 : "pageserver_walreceiver_active_managers",
2730 0 : "Number of active walreceiver managers"
2731 0 : )
2732 0 : .expect("failed to define a metric")
2733 0 : });
2734 :
2735 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2736 0 : register_int_counter_vec!(
2737 0 : "pageserver_walreceiver_switches_total",
2738 0 : "Number of walreceiver manager change_connection calls",
2739 0 : &["reason"]
2740 0 : )
2741 0 : .expect("failed to define a metric")
2742 0 : });
2743 :
2744 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2745 0 : register_int_counter!(
2746 0 : "pageserver_walreceiver_broker_updates_total",
2747 0 : "Number of received broker updates in walreceiver"
2748 0 : )
2749 0 : .expect("failed to define a metric")
2750 0 : });
2751 :
2752 1 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2753 1 : register_int_counter_vec!(
2754 1 : "pageserver_walreceiver_candidates_events_total",
2755 1 : "Number of walreceiver candidate events",
2756 1 : &["event"]
2757 1 : )
2758 1 : .expect("failed to define a metric")
2759 1 : });
2760 :
2761 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2762 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2763 :
2764 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2765 1 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2766 :
2767 : // Metrics collected on WAL redo operations
2768 : //
2769 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2770 : // for access to the postgres process ('wait') since there is only one for
2771 : // each tenant.
2772 :
2773 : /// Time buckets are small because we want to be able to measure the
2774 : /// smallest redo processing times. These buckets allow us to measure down
2775 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2776 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2777 : ///
2778 : /// Values up to 1s are recorded because metrics show that we have redo
2779 : /// durations and lock times larger than 0.250s.
2780 : macro_rules! redo_histogram_time_buckets {
2781 : () => {
2782 : vec![
2783 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2784 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2785 : 1.000_000,
2786 : ]
2787 : };
2788 : }
2789 :
2790 : /// While we're at it, also measure the amount of records replayed in each
2791 : /// operation. We have a global 'total replayed' counter, but that's not
2792 : /// as useful as 'what is the skew for how many records we replay in one
2793 : /// operation'.
2794 : macro_rules! redo_histogram_count_buckets {
2795 : () => {
2796 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2797 : };
2798 : }
2799 :
2800 : macro_rules! redo_bytes_histogram_count_buckets {
2801 : () => {
2802 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2803 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2804 : vec![
2805 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2806 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2807 : ]
2808 : };
2809 : }
2810 :
2811 : pub(crate) struct WalIngestMetrics {
2812 : pub(crate) bytes_received: IntCounter,
2813 : pub(crate) records_received: IntCounter,
2814 : pub(crate) records_observed: IntCounter,
2815 : pub(crate) records_committed: IntCounter,
2816 : pub(crate) records_filtered: IntCounter,
2817 : pub(crate) values_committed_metadata_images: IntCounter,
2818 : pub(crate) values_committed_metadata_deltas: IntCounter,
2819 : pub(crate) values_committed_data_images: IntCounter,
2820 : pub(crate) values_committed_data_deltas: IntCounter,
2821 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2822 : }
2823 :
2824 : impl WalIngestMetrics {
2825 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2826 0 : if stats.metadata_images > 0 {
2827 0 : self.values_committed_metadata_images
2828 0 : .inc_by(stats.metadata_images);
2829 0 : }
2830 0 : if stats.metadata_deltas > 0 {
2831 0 : self.values_committed_metadata_deltas
2832 0 : .inc_by(stats.metadata_deltas);
2833 0 : }
2834 0 : if stats.data_images > 0 {
2835 0 : self.values_committed_data_images.inc_by(stats.data_images);
2836 0 : }
2837 0 : if stats.data_deltas > 0 {
2838 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2839 0 : }
2840 0 : }
2841 : }
2842 :
2843 5 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2844 5 : let values_committed = register_int_counter_vec!(
2845 5 : "pageserver_wal_ingest_values_committed",
2846 5 : "Number of values committed to pageserver storage from WAL records",
2847 5 : &["class", "kind"],
2848 5 : )
2849 5 : .expect("failed to define a metric");
2850 5 :
2851 5 : WalIngestMetrics {
2852 5 : bytes_received: register_int_counter!(
2853 5 : "pageserver_wal_ingest_bytes_received",
2854 5 : "Bytes of WAL ingested from safekeepers",
2855 5 : )
2856 5 : .unwrap(),
2857 5 : records_received: register_int_counter!(
2858 5 : "pageserver_wal_ingest_records_received",
2859 5 : "Number of WAL records received from safekeepers"
2860 5 : )
2861 5 : .expect("failed to define a metric"),
2862 5 : records_observed: register_int_counter!(
2863 5 : "pageserver_wal_ingest_records_observed",
2864 5 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2865 5 : )
2866 5 : .expect("failed to define a metric"),
2867 5 : records_committed: register_int_counter!(
2868 5 : "pageserver_wal_ingest_records_committed",
2869 5 : "Number of WAL records which resulted in writes to pageserver storage"
2870 5 : )
2871 5 : .expect("failed to define a metric"),
2872 5 : records_filtered: register_int_counter!(
2873 5 : "pageserver_wal_ingest_records_filtered",
2874 5 : "Number of WAL records filtered out due to sharding"
2875 5 : )
2876 5 : .expect("failed to define a metric"),
2877 5 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2878 5 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2879 5 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2880 5 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2881 5 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2882 5 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2883 5 : "Total number of zero gap blocks written on relation extends"
2884 5 : )
2885 5 : .expect("failed to define a metric"),
2886 5 : }
2887 5 : });
2888 :
2889 107 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2890 107 : register_int_counter_vec!(
2891 107 : "pageserver_timeline_wal_records_received",
2892 107 : "Number of WAL records received per shard",
2893 107 : &["tenant_id", "shard_id", "timeline_id"]
2894 107 : )
2895 107 : .expect("failed to define a metric")
2896 107 : });
2897 :
2898 3 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2899 3 : register_histogram!(
2900 3 : "pageserver_wal_redo_seconds",
2901 3 : "Time spent on WAL redo",
2902 3 : redo_histogram_time_buckets!()
2903 3 : )
2904 3 : .expect("failed to define a metric")
2905 3 : });
2906 :
2907 3 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2908 3 : register_histogram!(
2909 3 : "pageserver_wal_redo_records_histogram",
2910 3 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2911 3 : redo_histogram_count_buckets!(),
2912 3 : )
2913 3 : .expect("failed to define a metric")
2914 3 : });
2915 :
2916 3 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2917 3 : register_histogram!(
2918 3 : "pageserver_wal_redo_bytes_histogram",
2919 3 : "Histogram of number of records replayed per redo sent to Postgres",
2920 3 : redo_bytes_histogram_count_buckets!(),
2921 3 : )
2922 3 : .expect("failed to define a metric")
2923 3 : });
2924 :
2925 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2926 3 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2927 3 : register_int_counter!(
2928 3 : "pageserver_replayed_wal_records_total",
2929 3 : "Number of WAL records replayed in WAL redo process"
2930 3 : )
2931 3 : .unwrap()
2932 3 : });
2933 :
2934 : #[rustfmt::skip]
2935 4 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2936 4 : register_histogram!(
2937 4 : "pageserver_wal_redo_process_launch_duration",
2938 4 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2939 4 : vec![
2940 4 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2941 4 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2942 4 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2943 4 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2944 4 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2945 4 : ],
2946 4 : )
2947 4 : .expect("failed to define a metric")
2948 4 : });
2949 :
2950 : pub(crate) struct WalRedoProcessCounters {
2951 : pub(crate) started: IntCounter,
2952 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2953 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2954 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2955 : }
2956 :
2957 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2958 : pub(crate) enum WalRedoKillCause {
2959 : WalRedoProcessDrop,
2960 : NoLeakChildDrop,
2961 : Startup,
2962 : }
2963 :
2964 : impl Default for WalRedoProcessCounters {
2965 4 : fn default() -> Self {
2966 4 : let started = register_int_counter!(
2967 4 : "pageserver_wal_redo_process_started_total",
2968 4 : "Number of WAL redo processes started",
2969 4 : )
2970 4 : .unwrap();
2971 4 :
2972 4 : let killed = register_int_counter_vec!(
2973 4 : "pageserver_wal_redo_process_stopped_total",
2974 4 : "Number of WAL redo processes stopped",
2975 4 : &["cause"],
2976 4 : )
2977 4 : .unwrap();
2978 4 :
2979 4 : let active_stderr_logger_tasks_started = register_int_counter!(
2980 4 : "pageserver_walredo_stderr_logger_tasks_started_total",
2981 4 : "Number of active walredo stderr logger tasks that have started",
2982 4 : )
2983 4 : .unwrap();
2984 4 :
2985 4 : let active_stderr_logger_tasks_finished = register_int_counter!(
2986 4 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2987 4 : "Number of active walredo stderr logger tasks that have finished",
2988 4 : )
2989 4 : .unwrap();
2990 4 :
2991 4 : Self {
2992 4 : started,
2993 12 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2994 12 : let cause = WalRedoKillCause::from_usize(i);
2995 12 : let cause_str: &'static str = cause.into();
2996 12 : killed.with_label_values(&[cause_str])
2997 12 : })),
2998 4 : active_stderr_logger_tasks_started,
2999 4 : active_stderr_logger_tasks_finished,
3000 4 : }
3001 4 : }
3002 : }
3003 :
3004 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
3005 : Lazy::new(WalRedoProcessCounters::default);
3006 :
3007 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
3008 : pub(crate) struct StorageTimeMetricsTimer {
3009 : metrics: StorageTimeMetrics,
3010 : start: Instant,
3011 : }
3012 :
3013 : impl StorageTimeMetricsTimer {
3014 1088 : fn new(metrics: StorageTimeMetrics) -> Self {
3015 1088 : Self {
3016 1088 : metrics,
3017 1088 : start: Instant::now(),
3018 1088 : }
3019 1088 : }
3020 :
3021 : /// Returns the elapsed duration of the timer.
3022 1088 : pub fn elapsed(&self) -> Duration {
3023 1088 : self.start.elapsed()
3024 1088 : }
3025 :
3026 : /// Record the time from creation to now and return it.
3027 1088 : pub fn stop_and_record(self) -> Duration {
3028 1088 : let duration = self.elapsed();
3029 1088 : let seconds = duration.as_secs_f64();
3030 1088 : self.metrics.timeline_sum.inc_by(seconds);
3031 1088 : self.metrics.timeline_count.inc();
3032 1088 : self.metrics.global_histogram.observe(seconds);
3033 1088 : duration
3034 1088 : }
3035 :
3036 : /// Turns this timer into a timer, which will always record -- usually this means recording
3037 : /// regardless an early `?` path was taken in a function.
3038 10 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
3039 10 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
3040 10 : }
3041 : }
3042 :
3043 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
3044 :
3045 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
3046 10 : fn drop(&mut self) {
3047 10 : if let Some(inner) = self.0.take() {
3048 10 : inner.stop_and_record();
3049 10 : }
3050 10 : }
3051 : }
3052 :
3053 : impl AlwaysRecordingStorageTimeMetricsTimer {
3054 : /// Returns the elapsed duration of the timer.
3055 0 : pub fn elapsed(&self) -> Duration {
3056 0 : self.0.as_ref().expect("not dropped yet").elapsed()
3057 0 : }
3058 : }
3059 :
3060 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
3061 : /// timeline total sum and count.
3062 : #[derive(Clone, Debug)]
3063 : pub(crate) struct StorageTimeMetrics {
3064 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
3065 : timeline_sum: Counter,
3066 : /// Number of oeprations, per operation, tenant_id and timeline_id
3067 : timeline_count: IntCounter,
3068 : /// Global histogram having only the "operation" label.
3069 : global_histogram: Histogram,
3070 : }
3071 :
3072 : impl StorageTimeMetrics {
3073 2097 : pub fn new(
3074 2097 : operation: StorageTimeOperation,
3075 2097 : tenant_id: &str,
3076 2097 : shard_id: &str,
3077 2097 : timeline_id: &str,
3078 2097 : ) -> Self {
3079 2097 : let operation: &'static str = operation.into();
3080 2097 :
3081 2097 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
3082 2097 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3083 2097 : .unwrap();
3084 2097 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
3085 2097 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3086 2097 : .unwrap();
3087 2097 : let global_histogram = STORAGE_TIME_GLOBAL
3088 2097 : .get_metric_with_label_values(&[operation])
3089 2097 : .unwrap();
3090 2097 :
3091 2097 : StorageTimeMetrics {
3092 2097 : timeline_sum,
3093 2097 : timeline_count,
3094 2097 : global_histogram,
3095 2097 : }
3096 2097 : }
3097 :
3098 : /// Starts timing a new operation.
3099 : ///
3100 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
3101 1088 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
3102 1088 : StorageTimeMetricsTimer::new(self.clone())
3103 1088 : }
3104 : }
3105 :
3106 : pub(crate) struct TimelineMetrics {
3107 : tenant_id: String,
3108 : shard_id: String,
3109 : timeline_id: String,
3110 : pub flush_time_histo: StorageTimeMetrics,
3111 : pub flush_delay_histo: StorageTimeMetrics,
3112 : pub compact_time_histo: StorageTimeMetrics,
3113 : pub create_images_time_histo: StorageTimeMetrics,
3114 : pub logical_size_histo: StorageTimeMetrics,
3115 : pub imitate_logical_size_histo: StorageTimeMetrics,
3116 : pub load_layer_map_histo: StorageTimeMetrics,
3117 : pub garbage_collect_histo: StorageTimeMetrics,
3118 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
3119 : pub last_record_lsn_gauge: IntGauge,
3120 : pub disk_consistent_lsn_gauge: IntGauge,
3121 : pub pitr_history_size: UIntGauge,
3122 : pub archival_size: UIntGauge,
3123 : pub layers_per_read: Histogram,
3124 : pub standby_horizon_gauge: IntGauge,
3125 : pub resident_physical_size_gauge: UIntGauge,
3126 : pub visible_physical_size_gauge: UIntGauge,
3127 : /// copy of LayeredTimeline.current_logical_size
3128 : pub current_logical_size_gauge: UIntGauge,
3129 : pub aux_file_size_gauge: IntGauge,
3130 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
3131 : pub evictions: IntCounter,
3132 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
3133 : /// Number of valid LSN leases.
3134 : pub valid_lsn_lease_count_gauge: UIntGauge,
3135 : pub wal_records_received: IntCounter,
3136 : pub storage_io_size: StorageIoSizeMetrics,
3137 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
3138 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
3139 : pub wait_ondemand_download_time: wait_ondemand_download_time::WaitOndemandDownloadTimeSum,
3140 : shutdown: std::sync::atomic::AtomicBool,
3141 : }
3142 :
3143 : impl TimelineMetrics {
3144 233 : pub fn new(
3145 233 : tenant_shard_id: &TenantShardId,
3146 233 : timeline_id_raw: &TimelineId,
3147 233 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
3148 233 : ) -> Self {
3149 233 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3150 233 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3151 233 : let timeline_id = timeline_id_raw.to_string();
3152 233 : let flush_time_histo = StorageTimeMetrics::new(
3153 233 : StorageTimeOperation::LayerFlush,
3154 233 : &tenant_id,
3155 233 : &shard_id,
3156 233 : &timeline_id,
3157 233 : );
3158 233 : let flush_delay_histo = StorageTimeMetrics::new(
3159 233 : StorageTimeOperation::LayerFlushDelay,
3160 233 : &tenant_id,
3161 233 : &shard_id,
3162 233 : &timeline_id,
3163 233 : );
3164 233 : let compact_time_histo = StorageTimeMetrics::new(
3165 233 : StorageTimeOperation::Compact,
3166 233 : &tenant_id,
3167 233 : &shard_id,
3168 233 : &timeline_id,
3169 233 : );
3170 233 : let create_images_time_histo = StorageTimeMetrics::new(
3171 233 : StorageTimeOperation::CreateImages,
3172 233 : &tenant_id,
3173 233 : &shard_id,
3174 233 : &timeline_id,
3175 233 : );
3176 233 : let logical_size_histo = StorageTimeMetrics::new(
3177 233 : StorageTimeOperation::LogicalSize,
3178 233 : &tenant_id,
3179 233 : &shard_id,
3180 233 : &timeline_id,
3181 233 : );
3182 233 : let imitate_logical_size_histo = StorageTimeMetrics::new(
3183 233 : StorageTimeOperation::ImitateLogicalSize,
3184 233 : &tenant_id,
3185 233 : &shard_id,
3186 233 : &timeline_id,
3187 233 : );
3188 233 : let load_layer_map_histo = StorageTimeMetrics::new(
3189 233 : StorageTimeOperation::LoadLayerMap,
3190 233 : &tenant_id,
3191 233 : &shard_id,
3192 233 : &timeline_id,
3193 233 : );
3194 233 : let garbage_collect_histo = StorageTimeMetrics::new(
3195 233 : StorageTimeOperation::Gc,
3196 233 : &tenant_id,
3197 233 : &shard_id,
3198 233 : &timeline_id,
3199 233 : );
3200 233 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
3201 233 : StorageTimeOperation::FindGcCutoffs,
3202 233 : &tenant_id,
3203 233 : &shard_id,
3204 233 : &timeline_id,
3205 233 : );
3206 233 : let last_record_lsn_gauge = LAST_RECORD_LSN
3207 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3208 233 : .unwrap();
3209 233 :
3210 233 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
3211 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3212 233 : .unwrap();
3213 233 :
3214 233 : let pitr_history_size = PITR_HISTORY_SIZE
3215 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3216 233 : .unwrap();
3217 233 :
3218 233 : let archival_size = TIMELINE_ARCHIVE_SIZE
3219 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3220 233 : .unwrap();
3221 233 :
3222 233 : let layers_per_read = LAYERS_PER_READ
3223 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3224 233 : .unwrap();
3225 233 :
3226 233 : let standby_horizon_gauge = STANDBY_HORIZON
3227 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3228 233 : .unwrap();
3229 233 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
3230 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3231 233 : .unwrap();
3232 233 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
3233 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3234 233 : .unwrap();
3235 233 : // TODO: we shouldn't expose this metric
3236 233 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
3237 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3238 233 : .unwrap();
3239 233 : let aux_file_size_gauge = AUX_FILE_SIZE
3240 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3241 233 : .unwrap();
3242 233 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
3243 233 : let directory_entries_count_gauge_closure = {
3244 233 : let tenant_shard_id = *tenant_shard_id;
3245 233 : let timeline_id_raw = *timeline_id_raw;
3246 0 : move || {
3247 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3248 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3249 0 : let timeline_id = timeline_id_raw.to_string();
3250 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
3251 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3252 0 : .unwrap();
3253 0 : gauge
3254 0 : }
3255 : };
3256 233 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3257 233 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3258 233 : let evictions = EVICTIONS
3259 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3260 233 : .unwrap();
3261 233 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3262 233 : .build(&tenant_id, &shard_id, &timeline_id);
3263 233 :
3264 233 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3265 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3266 233 : .unwrap();
3267 233 :
3268 233 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3269 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3270 233 : .unwrap();
3271 233 :
3272 233 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3273 233 :
3274 233 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3275 233 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3276 233 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3277 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3278 233 : .unwrap(),
3279 233 : };
3280 233 :
3281 233 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3282 233 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3283 233 : .unwrap();
3284 233 :
3285 233 : let wait_ondemand_download_time =
3286 233 : wait_ondemand_download_time::WaitOndemandDownloadTimeSum::new(
3287 233 : &tenant_id,
3288 233 : &shard_id,
3289 233 : &timeline_id,
3290 233 : );
3291 233 :
3292 233 : TimelineMetrics {
3293 233 : tenant_id,
3294 233 : shard_id,
3295 233 : timeline_id,
3296 233 : flush_time_histo,
3297 233 : flush_delay_histo,
3298 233 : compact_time_histo,
3299 233 : create_images_time_histo,
3300 233 : logical_size_histo,
3301 233 : imitate_logical_size_histo,
3302 233 : garbage_collect_histo,
3303 233 : find_gc_cutoffs_histo,
3304 233 : load_layer_map_histo,
3305 233 : last_record_lsn_gauge,
3306 233 : disk_consistent_lsn_gauge,
3307 233 : pitr_history_size,
3308 233 : archival_size,
3309 233 : layers_per_read,
3310 233 : standby_horizon_gauge,
3311 233 : resident_physical_size_gauge,
3312 233 : visible_physical_size_gauge,
3313 233 : current_logical_size_gauge,
3314 233 : aux_file_size_gauge,
3315 233 : directory_entries_count_gauge,
3316 233 : evictions,
3317 233 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3318 233 : evictions_with_low_residence_duration,
3319 233 : ),
3320 233 : storage_io_size,
3321 233 : valid_lsn_lease_count_gauge,
3322 233 : wal_records_received,
3323 233 : wait_lsn_in_progress_micros,
3324 233 : wait_lsn_start_finish_counterpair,
3325 233 : wait_ondemand_download_time,
3326 233 : shutdown: std::sync::atomic::AtomicBool::default(),
3327 233 : }
3328 233 : }
3329 :
3330 793 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3331 793 : self.resident_physical_size_add(sz);
3332 793 : }
3333 :
3334 274 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3335 274 : self.resident_physical_size_gauge.sub(sz);
3336 274 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3337 274 : }
3338 :
3339 861 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3340 861 : self.resident_physical_size_gauge.add(sz);
3341 861 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3342 861 : }
3343 :
3344 5 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3345 5 : self.resident_physical_size_gauge.get()
3346 5 : }
3347 :
3348 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3349 1325 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3350 1325 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3351 712 : true => LayerLevel::L0,
3352 613 : false => LayerLevel::L1,
3353 : };
3354 1325 : let kind = match layer_desc.is_delta() {
3355 1095 : true => LayerKind::Delta,
3356 230 : false => LayerKind::Image,
3357 : };
3358 1325 : [
3359 1325 : &self.tenant_id,
3360 1325 : &self.shard_id,
3361 1325 : &self.timeline_id,
3362 1325 : level.into(),
3363 1325 : kind.into(),
3364 1325 : ]
3365 1325 : }
3366 :
3367 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3368 1186 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3369 1186 : [
3370 1186 : &self.tenant_id,
3371 1186 : &self.shard_id,
3372 1186 : &self.timeline_id,
3373 1186 : LayerLevel::Frozen.into(),
3374 1186 : LayerKind::Delta.into(), // by definition
3375 1186 : ]
3376 1186 : }
3377 :
3378 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3379 593 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3380 593 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3381 593 : let labels = self.make_frozen_layer_labels(layer);
3382 593 : let size = layer.try_len().expect("frozen layer should have no writer");
3383 593 : TIMELINE_LAYER_COUNT
3384 593 : .get_metric_with_label_values(&labels)
3385 593 : .unwrap()
3386 593 : .dec();
3387 593 : TIMELINE_LAYER_SIZE
3388 593 : .get_metric_with_label_values(&labels)
3389 593 : .unwrap()
3390 593 : .sub(size);
3391 593 : }
3392 :
3393 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3394 593 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3395 593 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3396 593 : let labels = self.make_frozen_layer_labels(layer);
3397 593 : let size = layer.try_len().expect("frozen layer should have no writer");
3398 593 : TIMELINE_LAYER_COUNT
3399 593 : .get_metric_with_label_values(&labels)
3400 593 : .unwrap()
3401 593 : .inc();
3402 593 : TIMELINE_LAYER_SIZE
3403 593 : .get_metric_with_label_values(&labels)
3404 593 : .unwrap()
3405 593 : .add(size);
3406 593 : }
3407 :
3408 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3409 348 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3410 348 : let labels = self.make_layer_labels(layer_desc);
3411 348 : TIMELINE_LAYER_COUNT
3412 348 : .get_metric_with_label_values(&labels)
3413 348 : .unwrap()
3414 348 : .dec();
3415 348 : TIMELINE_LAYER_SIZE
3416 348 : .get_metric_with_label_values(&labels)
3417 348 : .unwrap()
3418 348 : .sub(layer_desc.file_size);
3419 348 : }
3420 :
3421 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3422 977 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3423 977 : let labels = self.make_layer_labels(layer_desc);
3424 977 : TIMELINE_LAYER_COUNT
3425 977 : .get_metric_with_label_values(&labels)
3426 977 : .unwrap()
3427 977 : .inc();
3428 977 : TIMELINE_LAYER_SIZE
3429 977 : .get_metric_with_label_values(&labels)
3430 977 : .unwrap()
3431 977 : .add(layer_desc.file_size);
3432 977 : }
3433 :
3434 5 : pub(crate) fn shutdown(&self) {
3435 5 : let was_shutdown = self
3436 5 : .shutdown
3437 5 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3438 5 :
3439 5 : if was_shutdown {
3440 : // this happens on tenant deletion because tenant first shuts down timelines, then
3441 : // invokes timeline deletion which first shuts down the timeline again.
3442 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3443 0 : return;
3444 5 : }
3445 5 :
3446 5 : let tenant_id = &self.tenant_id;
3447 5 : let timeline_id = &self.timeline_id;
3448 5 : let shard_id = &self.shard_id;
3449 5 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3450 5 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3451 5 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3452 5 : {
3453 5 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3454 5 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3455 5 : }
3456 5 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3457 5 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3458 5 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3459 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3460 5 : }
3461 :
3462 5 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3463 5 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3464 :
3465 20 : for ref level in LayerLevel::iter() {
3466 45 : for ref kind in LayerKind::iter() {
3467 30 : let labels: [&str; 5] =
3468 30 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3469 30 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3470 30 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3471 30 : }
3472 : }
3473 :
3474 5 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3475 5 :
3476 5 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3477 5 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3478 5 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3479 5 :
3480 5 : self.evictions_with_low_residence_duration
3481 5 : .write()
3482 5 : .unwrap()
3483 5 : .remove(tenant_id, shard_id, timeline_id);
3484 :
3485 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3486 : // removed at the end of it. The idea is to have the metrics outlive the
3487 : // entity during which they're observed, e.g., the smgr metrics shall
3488 : // outlive an individual smgr connection, but not the timeline.
3489 :
3490 50 : for op in StorageTimeOperation::VARIANTS {
3491 45 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3492 45 : op,
3493 45 : tenant_id,
3494 45 : shard_id,
3495 45 : timeline_id,
3496 45 : ]);
3497 45 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3498 45 : op,
3499 45 : tenant_id,
3500 45 : shard_id,
3501 45 : timeline_id,
3502 45 : ]);
3503 45 : }
3504 :
3505 15 : for op in StorageIoSizeOperation::VARIANTS {
3506 10 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3507 10 : }
3508 :
3509 : let _ =
3510 5 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3511 5 :
3512 5 : {
3513 5 : let mut res = [Ok(()), Ok(())];
3514 5 : WAIT_LSN_START_FINISH_COUNTERPAIR
3515 5 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3516 5 : }
3517 5 :
3518 5 : wait_ondemand_download_time::shutdown_timeline(tenant_id, shard_id, timeline_id);
3519 5 :
3520 5 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3521 5 : SmgrQueryType::GetPageAtLsn.into(),
3522 5 : tenant_id,
3523 5 : shard_id,
3524 5 : timeline_id,
3525 5 : ]);
3526 5 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3527 5 : SmgrQueryType::GetPageAtLsn.into(),
3528 5 : tenant_id,
3529 5 : shard_id,
3530 5 : timeline_id,
3531 5 : ]);
3532 5 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3533 5 : tenant_id,
3534 5 : shard_id,
3535 5 : timeline_id,
3536 5 : ]);
3537 5 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3538 5 : tenant_id,
3539 5 : shard_id,
3540 5 : timeline_id,
3541 5 : ]);
3542 5 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3543 5 : tenant_id,
3544 5 : shard_id,
3545 5 : timeline_id,
3546 5 : ]);
3547 5 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3548 5 : tenant_id,
3549 5 : shard_id,
3550 5 : timeline_id,
3551 5 : ]);
3552 :
3553 40 : for reason in GetPageBatchBreakReason::iter() {
3554 35 : let _ = PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.remove_label_values(&[
3555 35 : tenant_id,
3556 35 : shard_id,
3557 35 : timeline_id,
3558 35 : reason.into(),
3559 35 : ]);
3560 35 : }
3561 5 : }
3562 : }
3563 :
3564 3 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3565 3 : let tid = tenant_shard_id.tenant_id.to_string();
3566 3 : let shard_id = tenant_shard_id.shard_slug().to_string();
3567 3 :
3568 3 : // Only shard zero deals in synthetic sizes
3569 3 : if tenant_shard_id.is_shard_zero() {
3570 3 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3571 3 : }
3572 3 : let _ = TENANT_OFFLOADED_TIMELINES.remove_label_values(&[&tid, &shard_id]);
3573 3 :
3574 3 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3575 3 :
3576 3 : // we leave the BROKEN_TENANTS_SET entry if any
3577 3 : }
3578 :
3579 : /// Maintain a per timeline gauge in addition to the global gauge.
3580 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3581 : last_set: AtomicU64,
3582 : gauge: UIntGauge,
3583 : }
3584 :
3585 : impl PerTimelineRemotePhysicalSizeGauge {
3586 238 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3587 238 : Self {
3588 238 : last_set: AtomicU64::new(0),
3589 238 : gauge: per_timeline_gauge,
3590 238 : }
3591 238 : }
3592 989 : pub(crate) fn set(&self, sz: u64) {
3593 989 : self.gauge.set(sz);
3594 989 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3595 989 : if sz < prev {
3596 20 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3597 969 : } else {
3598 969 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3599 969 : };
3600 989 : }
3601 1 : pub(crate) fn get(&self) -> u64 {
3602 1 : self.gauge.get()
3603 1 : }
3604 : }
3605 :
3606 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3607 10 : fn drop(&mut self) {
3608 10 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3609 10 : }
3610 : }
3611 :
3612 : pub(crate) struct RemoteTimelineClientMetrics {
3613 : tenant_id: String,
3614 : shard_id: String,
3615 : timeline_id: String,
3616 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3617 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3618 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3619 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3620 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3621 : }
3622 :
3623 : impl RemoteTimelineClientMetrics {
3624 238 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3625 238 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3626 238 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3627 238 : let timeline_id_str = timeline_id.to_string();
3628 238 :
3629 238 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3630 238 : REMOTE_PHYSICAL_SIZE
3631 238 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3632 238 : .unwrap(),
3633 238 : );
3634 238 :
3635 238 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3636 238 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3637 238 : .unwrap();
3638 238 :
3639 238 : RemoteTimelineClientMetrics {
3640 238 : tenant_id: tenant_id_str,
3641 238 : shard_id: shard_id_str,
3642 238 : timeline_id: timeline_id_str,
3643 238 : calls: Mutex::new(HashMap::default()),
3644 238 : bytes_started_counter: Mutex::new(HashMap::default()),
3645 238 : bytes_finished_counter: Mutex::new(HashMap::default()),
3646 238 : remote_physical_size_gauge,
3647 238 : projected_remote_consistent_lsn_gauge,
3648 238 : }
3649 238 : }
3650 :
3651 1638 : pub fn remote_operation_time(
3652 1638 : &self,
3653 1638 : task_kind: Option<TaskKind>,
3654 1638 : file_kind: &RemoteOpFileKind,
3655 1638 : op_kind: &RemoteOpKind,
3656 1638 : status: &'static str,
3657 1638 : ) -> Histogram {
3658 1638 : REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY
3659 1638 : .get_metric_with_label_values(&[
3660 1638 : task_kind.as_ref().map(|tk| tk.into()).unwrap_or("unknown"),
3661 1638 : file_kind.as_str(),
3662 1638 : op_kind.as_str(),
3663 1638 : status,
3664 1638 : ])
3665 1638 : .unwrap()
3666 1638 : }
3667 :
3668 3759 : fn calls_counter_pair(
3669 3759 : &self,
3670 3759 : file_kind: &RemoteOpFileKind,
3671 3759 : op_kind: &RemoteOpKind,
3672 3759 : ) -> IntCounterPair {
3673 3759 : let mut guard = self.calls.lock().unwrap();
3674 3759 : let key = (file_kind.as_str(), op_kind.as_str());
3675 3759 : let metric = guard.entry(key).or_insert_with(move || {
3676 427 : REMOTE_TIMELINE_CLIENT_CALLS
3677 427 : .get_metric_with_label_values(&[
3678 427 : &self.tenant_id,
3679 427 : &self.shard_id,
3680 427 : &self.timeline_id,
3681 427 : key.0,
3682 427 : key.1,
3683 427 : ])
3684 427 : .unwrap()
3685 3759 : });
3686 3759 : metric.clone()
3687 3759 : }
3688 :
3689 883 : fn bytes_started_counter(
3690 883 : &self,
3691 883 : file_kind: &RemoteOpFileKind,
3692 883 : op_kind: &RemoteOpKind,
3693 883 : ) -> IntCounter {
3694 883 : let mut guard = self.bytes_started_counter.lock().unwrap();
3695 883 : let key = (file_kind.as_str(), op_kind.as_str());
3696 883 : let metric = guard.entry(key).or_insert_with(move || {
3697 168 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3698 168 : .get_metric_with_label_values(&[
3699 168 : &self.tenant_id,
3700 168 : &self.shard_id,
3701 168 : &self.timeline_id,
3702 168 : key.0,
3703 168 : key.1,
3704 168 : ])
3705 168 : .unwrap()
3706 883 : });
3707 883 : metric.clone()
3708 883 : }
3709 :
3710 1748 : fn bytes_finished_counter(
3711 1748 : &self,
3712 1748 : file_kind: &RemoteOpFileKind,
3713 1748 : op_kind: &RemoteOpKind,
3714 1748 : ) -> IntCounter {
3715 1748 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3716 1748 : let key = (file_kind.as_str(), op_kind.as_str());
3717 1748 : let metric = guard.entry(key).or_insert_with(move || {
3718 168 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3719 168 : .get_metric_with_label_values(&[
3720 168 : &self.tenant_id,
3721 168 : &self.shard_id,
3722 168 : &self.timeline_id,
3723 168 : key.0,
3724 168 : key.1,
3725 168 : ])
3726 168 : .unwrap()
3727 1748 : });
3728 1748 : metric.clone()
3729 1748 : }
3730 : }
3731 :
3732 : #[cfg(test)]
3733 : impl RemoteTimelineClientMetrics {
3734 3 : pub fn get_bytes_started_counter_value(
3735 3 : &self,
3736 3 : file_kind: &RemoteOpFileKind,
3737 3 : op_kind: &RemoteOpKind,
3738 3 : ) -> Option<u64> {
3739 3 : let guard = self.bytes_started_counter.lock().unwrap();
3740 3 : let key = (file_kind.as_str(), op_kind.as_str());
3741 3 : guard.get(&key).map(|counter| counter.get())
3742 3 : }
3743 :
3744 3 : pub fn get_bytes_finished_counter_value(
3745 3 : &self,
3746 3 : file_kind: &RemoteOpFileKind,
3747 3 : op_kind: &RemoteOpKind,
3748 3 : ) -> Option<u64> {
3749 3 : let guard = self.bytes_finished_counter.lock().unwrap();
3750 3 : let key = (file_kind.as_str(), op_kind.as_str());
3751 3 : guard.get(&key).map(|counter| counter.get())
3752 3 : }
3753 : }
3754 :
3755 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3756 : #[must_use]
3757 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3758 : /// Decremented on drop.
3759 : calls_counter_pair: Option<IntCounterPair>,
3760 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3761 : bytes_finished: Option<(IntCounter, u64)>,
3762 : }
3763 :
3764 : impl RemoteTimelineClientCallMetricGuard {
3765 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3766 : /// The caller vouches to do the metric updates manually.
3767 1922 : pub fn will_decrement_manually(mut self) {
3768 1922 : let RemoteTimelineClientCallMetricGuard {
3769 1922 : calls_counter_pair,
3770 1922 : bytes_finished,
3771 1922 : } = &mut self;
3772 1922 : calls_counter_pair.take();
3773 1922 : bytes_finished.take();
3774 1922 : }
3775 : }
3776 :
3777 : impl Drop for RemoteTimelineClientCallMetricGuard {
3778 1939 : fn drop(&mut self) {
3779 1939 : let RemoteTimelineClientCallMetricGuard {
3780 1939 : calls_counter_pair,
3781 1939 : bytes_finished,
3782 1939 : } = self;
3783 1939 : if let Some(guard) = calls_counter_pair.take() {
3784 17 : guard.dec();
3785 1922 : }
3786 1939 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3787 0 : bytes_finished_metric.inc_by(*value);
3788 1939 : }
3789 1939 : }
3790 : }
3791 :
3792 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3793 : /// track the byte size of this call in applicable metric(s).
3794 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3795 : /// Do not account for this call's byte size in any metrics.
3796 : /// The `reason` field is there to make the call sites self-documenting
3797 : /// about why they don't need the metric.
3798 : DontTrackSize { reason: &'static str },
3799 : /// Track the byte size of the call in applicable metric(s).
3800 : Bytes(u64),
3801 : }
3802 :
3803 : impl RemoteTimelineClientMetrics {
3804 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3805 : ///
3806 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3807 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3808 : /// is more suitable.
3809 : /// Never do both.
3810 1939 : pub(crate) fn call_begin(
3811 1939 : &self,
3812 1939 : file_kind: &RemoteOpFileKind,
3813 1939 : op_kind: &RemoteOpKind,
3814 1939 : size: RemoteTimelineClientMetricsCallTrackSize,
3815 1939 : ) -> RemoteTimelineClientCallMetricGuard {
3816 1939 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3817 1939 : calls_counter_pair.inc();
3818 :
3819 1939 : let bytes_finished = match size {
3820 1056 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3821 1056 : // nothing to do
3822 1056 : None
3823 : }
3824 883 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3825 883 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3826 883 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3827 883 : Some((finished_counter, size))
3828 : }
3829 : };
3830 1939 : RemoteTimelineClientCallMetricGuard {
3831 1939 : calls_counter_pair: Some(calls_counter_pair),
3832 1939 : bytes_finished,
3833 1939 : }
3834 1939 : }
3835 :
3836 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3837 : /// Using the guard object is generally preferable.
3838 : /// See [`call_begin`](Self::call_begin) for more context.
3839 1820 : pub(crate) fn call_end(
3840 1820 : &self,
3841 1820 : file_kind: &RemoteOpFileKind,
3842 1820 : op_kind: &RemoteOpKind,
3843 1820 : size: RemoteTimelineClientMetricsCallTrackSize,
3844 1820 : ) {
3845 1820 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3846 1820 : calls_counter_pair.dec();
3847 1820 : match size {
3848 955 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3849 865 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3850 865 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3851 865 : }
3852 : }
3853 1820 : }
3854 : }
3855 :
3856 : impl Drop for RemoteTimelineClientMetrics {
3857 10 : fn drop(&mut self) {
3858 10 : let RemoteTimelineClientMetrics {
3859 10 : tenant_id,
3860 10 : shard_id,
3861 10 : timeline_id,
3862 10 : remote_physical_size_gauge,
3863 10 : calls,
3864 10 : bytes_started_counter,
3865 10 : bytes_finished_counter,
3866 10 : projected_remote_consistent_lsn_gauge,
3867 10 : } = self;
3868 12 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3869 12 : let mut res = [Ok(()), Ok(())];
3870 12 : REMOTE_TIMELINE_CLIENT_CALLS
3871 12 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3872 12 : // don't care about results
3873 12 : }
3874 10 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3875 3 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3876 3 : tenant_id,
3877 3 : shard_id,
3878 3 : timeline_id,
3879 3 : a,
3880 3 : b,
3881 3 : ]);
3882 3 : }
3883 10 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3884 3 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3885 3 : tenant_id,
3886 3 : shard_id,
3887 3 : timeline_id,
3888 3 : a,
3889 3 : b,
3890 3 : ]);
3891 3 : }
3892 10 : {
3893 10 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3894 10 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3895 10 : }
3896 10 : {
3897 10 : let _ = projected_remote_consistent_lsn_gauge;
3898 10 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3899 10 : tenant_id,
3900 10 : shard_id,
3901 10 : timeline_id,
3902 10 : ]);
3903 10 : }
3904 10 : }
3905 : }
3906 :
3907 : /// Wrapper future that measures the time spent by a remote storage operation,
3908 : /// and records the time and success/failure as a prometheus metric.
3909 : pub(crate) trait MeasureRemoteOp<O, E>: Sized + Future<Output = Result<O, E>> {
3910 1653 : async fn measure_remote_op(
3911 1653 : self,
3912 1653 : task_kind: Option<TaskKind>, // not all caller contexts have a RequestContext / TaskKind handy
3913 1653 : file_kind: RemoteOpFileKind,
3914 1653 : op: RemoteOpKind,
3915 1653 : metrics: Arc<RemoteTimelineClientMetrics>,
3916 1653 : ) -> Result<O, E> {
3917 1653 : let start = Instant::now();
3918 1653 : let res = self.await;
3919 1638 : let duration = start.elapsed();
3920 1638 : let status = if res.is_ok() { &"success" } else { &"failure" };
3921 1638 : metrics
3922 1638 : .remote_operation_time(task_kind, &file_kind, &op, status)
3923 1638 : .observe(duration.as_secs_f64());
3924 1638 : res
3925 1638 : }
3926 : }
3927 :
3928 : impl<Fut, O, E> MeasureRemoteOp<O, E> for Fut where Fut: Sized + Future<Output = Result<O, E>> {}
3929 :
3930 : pub mod tokio_epoll_uring {
3931 : use std::collections::HashMap;
3932 : use std::sync::{Arc, Mutex};
3933 :
3934 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3935 : use once_cell::sync::Lazy;
3936 :
3937 : /// Shared storage for tokio-epoll-uring thread local metrics.
3938 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3939 120 : Lazy::new(|| {
3940 120 : let slots_submission_queue_depth = register_histogram!(
3941 120 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3942 120 : "The slots waiters queue depth of each tokio_epoll_uring system",
3943 120 : vec![
3944 120 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3945 120 : ],
3946 120 : )
3947 120 : .expect("failed to define a metric");
3948 120 : ThreadLocalMetricsStorage {
3949 120 : observers: Mutex::new(HashMap::new()),
3950 120 : slots_submission_queue_depth,
3951 120 : }
3952 120 : });
3953 :
3954 : pub struct ThreadLocalMetricsStorage {
3955 : /// List of thread local metrics observers.
3956 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3957 : /// A histogram shared between all thread local systems
3958 : /// for collecting slots submission queue depth.
3959 : slots_submission_queue_depth: Histogram,
3960 : }
3961 :
3962 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3963 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3964 : ///
3965 : /// The System makes observations into [`Self`] and periodically, the collector
3966 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3967 : ///
3968 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3969 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3970 : /// for cache coherence protocol to get an exclusive cache line.
3971 : pub struct ThreadLocalMetrics {
3972 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3973 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3974 : }
3975 :
3976 : impl ThreadLocalMetricsStorage {
3977 : /// Registers a new thread local system. Returns a thread local metrics observer.
3978 524 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3979 524 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3980 524 : self.slots_submission_queue_depth.local(),
3981 524 : ));
3982 524 : let mut g = self.observers.lock().unwrap();
3983 524 : g.insert(id, Arc::clone(&per_system_metrics));
3984 524 : per_system_metrics
3985 524 : }
3986 :
3987 : /// Removes metrics observer for a thread local system.
3988 : /// This should be called before dropping a thread local system.
3989 120 : pub fn remove_system(&self, id: u64) {
3990 120 : let mut g = self.observers.lock().unwrap();
3991 120 : g.remove(&id);
3992 120 : }
3993 :
3994 : /// Flush all thread local metrics to the shared storage.
3995 0 : pub fn flush_thread_local_metrics(&self) {
3996 0 : let g = self.observers.lock().unwrap();
3997 0 : g.values().for_each(|local| {
3998 0 : local.flush();
3999 0 : });
4000 0 : }
4001 : }
4002 :
4003 : impl ThreadLocalMetrics {
4004 524 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
4005 524 : ThreadLocalMetrics {
4006 524 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
4007 524 : }
4008 524 : }
4009 :
4010 : /// Flushes the thread local metrics to shared aggregator.
4011 0 : pub fn flush(&self) {
4012 0 : let Self {
4013 0 : slots_submission_queue_depth,
4014 0 : } = self;
4015 0 : slots_submission_queue_depth.lock().unwrap().flush();
4016 0 : }
4017 : }
4018 :
4019 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
4020 394354 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
4021 394354 : let Self {
4022 394354 : slots_submission_queue_depth,
4023 394354 : } = self;
4024 394354 : slots_submission_queue_depth
4025 394354 : .lock()
4026 394354 : .unwrap()
4027 394354 : .observe(queue_depth as f64);
4028 394354 : }
4029 : }
4030 :
4031 : pub struct Collector {
4032 : descs: Vec<metrics::core::Desc>,
4033 : systems_created: UIntGauge,
4034 : systems_destroyed: UIntGauge,
4035 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
4036 : }
4037 :
4038 : impl metrics::core::Collector for Collector {
4039 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
4040 0 : self.descs.iter().collect()
4041 0 : }
4042 :
4043 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
4044 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
4045 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
4046 0 : systems_created,
4047 0 : systems_destroyed,
4048 0 : } = tokio_epoll_uring::metrics::global();
4049 0 : self.systems_created.set(systems_created);
4050 0 : mfs.extend(self.systems_created.collect());
4051 0 : self.systems_destroyed.set(systems_destroyed);
4052 0 : mfs.extend(self.systems_destroyed.collect());
4053 0 :
4054 0 : self.thread_local_metrics_storage
4055 0 : .flush_thread_local_metrics();
4056 0 :
4057 0 : mfs.extend(
4058 0 : self.thread_local_metrics_storage
4059 0 : .slots_submission_queue_depth
4060 0 : .collect(),
4061 0 : );
4062 0 : mfs
4063 0 : }
4064 : }
4065 :
4066 : impl Collector {
4067 : const NMETRICS: usize = 3;
4068 :
4069 : #[allow(clippy::new_without_default)]
4070 0 : pub fn new() -> Self {
4071 0 : let mut descs = Vec::new();
4072 0 :
4073 0 : let systems_created = UIntGauge::new(
4074 0 : "pageserver_tokio_epoll_uring_systems_created",
4075 0 : "counter of tokio-epoll-uring systems that were created",
4076 0 : )
4077 0 : .unwrap();
4078 0 : descs.extend(
4079 0 : metrics::core::Collector::desc(&systems_created)
4080 0 : .into_iter()
4081 0 : .cloned(),
4082 0 : );
4083 0 :
4084 0 : let systems_destroyed = UIntGauge::new(
4085 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
4086 0 : "counter of tokio-epoll-uring systems that were destroyed",
4087 0 : )
4088 0 : .unwrap();
4089 0 : descs.extend(
4090 0 : metrics::core::Collector::desc(&systems_destroyed)
4091 0 : .into_iter()
4092 0 : .cloned(),
4093 0 : );
4094 0 :
4095 0 : Self {
4096 0 : descs,
4097 0 : systems_created,
4098 0 : systems_destroyed,
4099 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
4100 0 : }
4101 0 : }
4102 : }
4103 :
4104 120 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4105 120 : register_int_counter!(
4106 120 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
4107 120 : "Number of times where thread_local_system creation spanned multiple executor threads",
4108 120 : )
4109 120 : .unwrap()
4110 120 : });
4111 :
4112 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4113 0 : register_int_counter!(
4114 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
4115 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
4116 0 : )
4117 0 : .unwrap()
4118 0 : });
4119 : }
4120 :
4121 : pub(crate) struct GlobalAndPerTenantIntCounter {
4122 : global: IntCounter,
4123 : per_tenant: IntCounter,
4124 : }
4125 :
4126 : impl GlobalAndPerTenantIntCounter {
4127 : #[inline(always)]
4128 0 : pub(crate) fn inc(&self) {
4129 0 : self.inc_by(1)
4130 0 : }
4131 : #[inline(always)]
4132 113660 : pub(crate) fn inc_by(&self, n: u64) {
4133 113660 : self.global.inc_by(n);
4134 113660 : self.per_tenant.inc_by(n);
4135 113660 : }
4136 : }
4137 :
4138 : pub(crate) mod tenant_throttling {
4139 : use metrics::register_int_counter_vec;
4140 : use once_cell::sync::Lazy;
4141 : use utils::shard::TenantShardId;
4142 :
4143 : use super::GlobalAndPerTenantIntCounter;
4144 :
4145 : pub(crate) struct Metrics<const KIND: usize> {
4146 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
4147 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
4148 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
4149 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
4150 : }
4151 :
4152 108 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4153 108 : register_int_counter_vec!(
4154 108 : "pageserver_tenant_throttling_count_accounted_start_global",
4155 108 : "Count of tenant throttling starts, by kind of throttle.",
4156 108 : &["kind"]
4157 108 : )
4158 108 : .unwrap()
4159 108 : });
4160 108 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4161 108 : register_int_counter_vec!(
4162 108 : "pageserver_tenant_throttling_count_accounted_start",
4163 108 : "Count of tenant throttling starts, by kind of throttle.",
4164 108 : &["kind", "tenant_id", "shard_id"]
4165 108 : )
4166 108 : .unwrap()
4167 108 : });
4168 108 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4169 108 : register_int_counter_vec!(
4170 108 : "pageserver_tenant_throttling_count_accounted_finish_global",
4171 108 : "Count of tenant throttling finishes, by kind of throttle.",
4172 108 : &["kind"]
4173 108 : )
4174 108 : .unwrap()
4175 108 : });
4176 108 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4177 108 : register_int_counter_vec!(
4178 108 : "pageserver_tenant_throttling_count_accounted_finish",
4179 108 : "Count of tenant throttling finishes, by kind of throttle.",
4180 108 : &["kind", "tenant_id", "shard_id"]
4181 108 : )
4182 108 : .unwrap()
4183 108 : });
4184 108 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4185 108 : register_int_counter_vec!(
4186 108 : "pageserver_tenant_throttling_wait_usecs_sum_global",
4187 108 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4188 108 : &["kind"]
4189 108 : )
4190 108 : .unwrap()
4191 108 : });
4192 108 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4193 108 : register_int_counter_vec!(
4194 108 : "pageserver_tenant_throttling_wait_usecs_sum",
4195 108 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4196 108 : &["kind", "tenant_id", "shard_id"]
4197 108 : )
4198 108 : .unwrap()
4199 108 : });
4200 :
4201 108 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4202 108 : register_int_counter_vec!(
4203 108 : "pageserver_tenant_throttling_count_global",
4204 108 : "Count of tenant throttlings, by kind of throttle.",
4205 108 : &["kind"]
4206 108 : )
4207 108 : .unwrap()
4208 108 : });
4209 108 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4210 108 : register_int_counter_vec!(
4211 108 : "pageserver_tenant_throttling_count",
4212 108 : "Count of tenant throttlings, by kind of throttle.",
4213 108 : &["kind", "tenant_id", "shard_id"]
4214 108 : )
4215 108 : .unwrap()
4216 108 : });
4217 :
4218 : const KINDS: &[&str] = &["pagestream"];
4219 : pub type Pagestream = Metrics<0>;
4220 :
4221 : impl<const KIND: usize> Metrics<KIND> {
4222 117 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
4223 117 : let per_tenant_label_values = &[
4224 117 : KINDS[KIND],
4225 117 : &tenant_shard_id.tenant_id.to_string(),
4226 117 : &tenant_shard_id.shard_slug().to_string(),
4227 117 : ];
4228 117 : Metrics {
4229 117 : count_accounted_start: {
4230 117 : GlobalAndPerTenantIntCounter {
4231 117 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
4232 117 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
4233 117 : .with_label_values(per_tenant_label_values),
4234 117 : }
4235 117 : },
4236 117 : count_accounted_finish: {
4237 117 : GlobalAndPerTenantIntCounter {
4238 117 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
4239 117 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
4240 117 : .with_label_values(per_tenant_label_values),
4241 117 : }
4242 117 : },
4243 117 : wait_time: {
4244 117 : GlobalAndPerTenantIntCounter {
4245 117 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
4246 117 : per_tenant: WAIT_USECS_PER_TENANT
4247 117 : .with_label_values(per_tenant_label_values),
4248 117 : }
4249 117 : },
4250 117 : count_throttled: {
4251 117 : GlobalAndPerTenantIntCounter {
4252 117 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
4253 117 : per_tenant: WAIT_COUNT_PER_TENANT
4254 117 : .with_label_values(per_tenant_label_values),
4255 117 : }
4256 117 : },
4257 117 : }
4258 117 : }
4259 : }
4260 :
4261 0 : pub(crate) fn preinitialize_global_metrics() {
4262 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4263 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4264 0 : Lazy::force(&WAIT_USECS);
4265 0 : Lazy::force(&WAIT_COUNT);
4266 0 : }
4267 :
4268 3 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4269 12 : for m in &[
4270 3 : &COUNT_ACCOUNTED_START_PER_TENANT,
4271 3 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4272 3 : &WAIT_USECS_PER_TENANT,
4273 3 : &WAIT_COUNT_PER_TENANT,
4274 3 : ] {
4275 24 : for kind in KINDS {
4276 12 : let _ = m.remove_label_values(&[
4277 12 : kind,
4278 12 : &tenant_shard_id.tenant_id.to_string(),
4279 12 : &tenant_shard_id.shard_slug().to_string(),
4280 12 : ]);
4281 12 : }
4282 : }
4283 3 : }
4284 : }
4285 :
4286 : pub(crate) mod disk_usage_based_eviction {
4287 : use super::*;
4288 :
4289 : pub(crate) struct Metrics {
4290 : pub(crate) tenant_collection_time: Histogram,
4291 : pub(crate) tenant_layer_count: Histogram,
4292 : pub(crate) layers_collected: IntCounter,
4293 : pub(crate) layers_selected: IntCounter,
4294 : pub(crate) layers_evicted: IntCounter,
4295 : }
4296 :
4297 : impl Default for Metrics {
4298 0 : fn default() -> Self {
4299 0 : let tenant_collection_time = register_histogram!(
4300 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4301 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4302 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4303 0 : )
4304 0 : .unwrap();
4305 0 :
4306 0 : let tenant_layer_count = register_histogram!(
4307 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4308 0 : "Amount of layers gathered from a tenant",
4309 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4310 0 : )
4311 0 : .unwrap();
4312 0 :
4313 0 : let layers_collected = register_int_counter!(
4314 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4315 0 : "Amount of layers collected"
4316 0 : )
4317 0 : .unwrap();
4318 0 :
4319 0 : let layers_selected = register_int_counter!(
4320 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
4321 0 : "Amount of layers selected"
4322 0 : )
4323 0 : .unwrap();
4324 0 :
4325 0 : let layers_evicted = register_int_counter!(
4326 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4327 0 : "Amount of layers successfully evicted"
4328 0 : )
4329 0 : .unwrap();
4330 0 :
4331 0 : Self {
4332 0 : tenant_collection_time,
4333 0 : tenant_layer_count,
4334 0 : layers_collected,
4335 0 : layers_selected,
4336 0 : layers_evicted,
4337 0 : }
4338 0 : }
4339 : }
4340 :
4341 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4342 : }
4343 :
4344 105 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4345 105 : register_uint_gauge_vec!(
4346 105 : "pageserver_tokio_executor_thread_configured_count",
4347 105 : "Total number of configued tokio executor threads in the process.
4348 105 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4349 105 : &["setup"],
4350 105 : )
4351 105 : .unwrap()
4352 105 : });
4353 :
4354 105 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4355 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4356 105 : let _guard = SERIALIZE.lock().unwrap();
4357 105 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4358 105 : TOKIO_EXECUTOR_THREAD_COUNT
4359 105 : .get_metric_with_label_values(&[setup])
4360 105 : .unwrap()
4361 105 : .set(u64::try_from(num_threads.get()).unwrap());
4362 105 : }
4363 :
4364 0 : pub(crate) static BASEBACKUP_CACHE_READ: Lazy<IntCounterVec> = Lazy::new(|| {
4365 0 : register_int_counter_vec!(
4366 0 : "pageserver_basebackup_cache_read_total",
4367 0 : "Number of read accesses to the basebackup cache grouped by hit/miss/error",
4368 0 : &["result"]
4369 0 : )
4370 0 : .expect("failed to define a metric")
4371 0 : });
4372 :
4373 0 : pub(crate) static BASEBACKUP_CACHE_PREPARE: Lazy<IntCounterVec> = Lazy::new(|| {
4374 0 : register_int_counter_vec!(
4375 0 : "pageserver_basebackup_cache_prepare_total",
4376 0 : "Number of prepare requests processed by the basebackup cache grouped by ok/skip/error",
4377 0 : &["result"]
4378 0 : )
4379 0 : .expect("failed to define a metric")
4380 0 : });
4381 :
4382 0 : pub(crate) static BASEBACKUP_CACHE_ENTRIES: Lazy<IntGauge> = Lazy::new(|| {
4383 0 : register_int_gauge!(
4384 0 : "pageserver_basebackup_cache_entries_total",
4385 0 : "Number of entries in the basebackup cache"
4386 0 : )
4387 0 : .expect("failed to define a metric")
4388 0 : });
4389 :
4390 : // FIXME: Support basebackup cache size metrics.
4391 : #[allow(dead_code)]
4392 0 : pub(crate) static BASEBACKUP_CACHE_SIZE: Lazy<IntGauge> = Lazy::new(|| {
4393 0 : register_int_gauge!(
4394 0 : "pageserver_basebackup_cache_size_bytes",
4395 0 : "Total size of all basebackup cache entries on disk in bytes"
4396 0 : )
4397 0 : .expect("failed to define a metric")
4398 0 : });
4399 :
4400 0 : static PAGESERVER_CONFIG_IGNORED_ITEMS: Lazy<UIntGaugeVec> = Lazy::new(|| {
4401 0 : register_uint_gauge_vec!(
4402 0 : "pageserver_config_ignored_items",
4403 0 : "TOML items present in the on-disk configuration file but ignored by the pageserver config parser.\
4404 0 : The `item` label is the dot-separated path of the ignored item in the on-disk configuration file.\
4405 0 : The value for an unknown config item is always 1.\
4406 0 : There is a special label value \"\", which is 0, so that there is always a metric exposed (simplifies dashboards).",
4407 0 : &["item"]
4408 0 : )
4409 0 : .unwrap()
4410 0 : });
4411 :
4412 0 : pub fn preinitialize_metrics(
4413 0 : conf: &'static PageServerConf,
4414 0 : ignored: config::ignored_fields::Paths,
4415 0 : ) {
4416 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4417 0 :
4418 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4419 0 : .with_label_values(&[""])
4420 0 : .set(0);
4421 0 : for path in &ignored.paths {
4422 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4423 0 : .with_label_values(&[path])
4424 0 : .set(1);
4425 0 : }
4426 :
4427 : // Python tests need these and on some we do alerting.
4428 : //
4429 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4430 : // order:
4431 : // - global metrics reside in a Lazy<PageserverMetrics>
4432 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4433 : // - could move the statics into TimelineMetrics::new()?
4434 :
4435 : // counters
4436 0 : [
4437 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4438 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4439 0 : &WALRECEIVER_BROKER_UPDATES,
4440 0 : &WALRECEIVER_CANDIDATES_ADDED,
4441 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4442 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4443 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4444 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4445 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4446 0 : &CIRCUIT_BREAKERS_BROKEN,
4447 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4448 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4449 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4450 0 : ]
4451 0 : .into_iter()
4452 0 : .for_each(|c| {
4453 0 : Lazy::force(c);
4454 0 : });
4455 0 :
4456 0 : // Deletion queue stats
4457 0 : Lazy::force(&DELETION_QUEUE);
4458 0 :
4459 0 : // Tenant stats
4460 0 : Lazy::force(&TENANT);
4461 0 :
4462 0 : // Tenant manager stats
4463 0 : Lazy::force(&TENANT_MANAGER);
4464 0 :
4465 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4466 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4467 :
4468 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4469 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4470 0 : // values from last restart.
4471 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4472 0 : }
4473 :
4474 : // countervecs
4475 0 : [
4476 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4477 0 : &SMGR_QUERY_STARTED_GLOBAL,
4478 0 : &PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL,
4479 0 : ]
4480 0 : .into_iter()
4481 0 : .for_each(|c| {
4482 0 : Lazy::force(c);
4483 0 : });
4484 0 :
4485 0 : // gauges
4486 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4487 0 :
4488 0 : // histograms
4489 0 : [
4490 0 : &LAYERS_PER_READ_GLOBAL,
4491 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4492 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4493 0 : &DELTAS_PER_READ_GLOBAL,
4494 0 : &WAIT_LSN_TIME,
4495 0 : &WAL_REDO_TIME,
4496 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4497 0 : &WAL_REDO_BYTES_HISTOGRAM,
4498 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4499 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4500 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4501 0 : ]
4502 0 : .into_iter()
4503 0 : .for_each(|h| {
4504 0 : Lazy::force(h);
4505 0 : });
4506 0 :
4507 0 : // Custom
4508 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4509 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4510 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4511 0 :
4512 0 : tenant_throttling::preinitialize_global_metrics();
4513 0 : wait_ondemand_download_time::preinitialize_global_metrics();
4514 0 : }
|