Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::sync::atomic::AtomicU64;
5 : use std::sync::{Arc, Mutex};
6 : use std::time::{Duration, Instant};
7 :
8 : use enum_map::{Enum as _, EnumMap};
9 : use futures::Future;
10 : use metrics::{
11 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
12 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
13 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
14 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
15 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
16 : };
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::config::{
19 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
20 : PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy,
21 : };
22 : use pageserver_api::models::InMemoryLayerInfo;
23 : use pageserver_api::shard::TenantShardId;
24 : use postgres_backend::{QueryError, is_expected_io_error};
25 : use pq_proto::framed::ConnectionError;
26 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
27 : use strum_macros::{IntoStaticStr, VariantNames};
28 : use utils::id::TimelineId;
29 :
30 : use crate::config;
31 : use crate::config::PageServerConf;
32 : use crate::context::{PageContentKind, RequestContext};
33 : use crate::pgdatadir_mapping::DatadirModificationStats;
34 : use crate::task_mgr::TaskKind;
35 : use crate::tenant::Timeline;
36 : use crate::tenant::layer_map::LayerMap;
37 : use crate::tenant::mgr::TenantSlot;
38 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
39 : use crate::tenant::tasks::BackgroundLoopKind;
40 : use crate::tenant::throttle::ThrottleResult;
41 :
42 : /// Prometheus histogram buckets (in seconds) for operations in the critical
43 : /// path. In other words, operations that directly affect that latency of user
44 : /// queries.
45 : ///
46 : /// The buckets capture the majority of latencies in the microsecond and
47 : /// millisecond range but also extend far enough up to distinguish "bad" from
48 : /// "really bad".
49 : const CRITICAL_OP_BUCKETS: &[f64] = &[
50 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
51 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
52 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
53 : ];
54 :
55 : // Metrics collected on operations on the storage repository.
56 : #[derive(Debug, VariantNames, IntoStaticStr)]
57 : #[strum(serialize_all = "kebab_case")]
58 : pub(crate) enum StorageTimeOperation {
59 : #[strum(serialize = "layer flush")]
60 : LayerFlush,
61 :
62 : #[strum(serialize = "layer flush delay")]
63 : LayerFlushDelay,
64 :
65 : #[strum(serialize = "compact")]
66 : Compact,
67 :
68 : #[strum(serialize = "create images")]
69 : CreateImages,
70 :
71 : #[strum(serialize = "logical size")]
72 : LogicalSize,
73 :
74 : #[strum(serialize = "imitate logical size")]
75 : ImitateLogicalSize,
76 :
77 : #[strum(serialize = "load layer map")]
78 : LoadLayerMap,
79 :
80 : #[strum(serialize = "gc")]
81 : Gc,
82 :
83 : #[strum(serialize = "find gc cutoffs")]
84 : FindGcCutoffs,
85 : }
86 :
87 424 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
88 424 : register_counter_vec!(
89 424 : "pageserver_storage_operations_seconds_sum",
90 424 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
91 424 : &["operation", "tenant_id", "shard_id", "timeline_id"],
92 424 : )
93 424 : .expect("failed to define a metric")
94 424 : });
95 :
96 424 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
97 424 : register_int_counter_vec!(
98 424 : "pageserver_storage_operations_seconds_count",
99 424 : "Count of storage operations with operation, tenant and timeline dimensions",
100 424 : &["operation", "tenant_id", "shard_id", "timeline_id"],
101 424 : )
102 424 : .expect("failed to define a metric")
103 424 : });
104 :
105 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
106 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
107 :
108 424 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
109 424 : register_histogram_vec!(
110 424 : "pageserver_storage_operations_seconds_global",
111 424 : "Time spent on storage operations",
112 424 : &["operation"],
113 424 : STORAGE_OP_BUCKETS.into(),
114 424 : )
115 424 : .expect("failed to define a metric")
116 424 : });
117 :
118 : /// Measures layers visited per read (i.e. read amplification).
119 : ///
120 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
121 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
122 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
123 : /// care about.
124 424 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
125 424 : register_histogram_vec!(
126 424 : "pageserver_layers_per_read",
127 424 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
128 424 : &["tenant_id", "shard_id", "timeline_id"],
129 424 : // Low resolution to reduce cardinality.
130 424 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
131 424 : )
132 424 : .expect("failed to define a metric")
133 424 : });
134 :
135 416 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
136 416 : register_histogram!(
137 416 : "pageserver_layers_per_read_global",
138 416 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
139 416 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
140 416 : )
141 416 : .expect("failed to define a metric")
142 416 : });
143 :
144 416 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
145 416 : register_histogram!(
146 416 : "pageserver_layers_per_read_batch_global",
147 416 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
148 416 : vec![
149 416 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
150 416 : ],
151 416 : )
152 416 : .expect("failed to define a metric")
153 416 : });
154 :
155 416 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
156 416 : register_histogram!(
157 416 : "pageserver_layers_per_read_amortized_global",
158 416 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
159 416 : all visited layers are divided by number of reads.",
160 416 : vec![
161 416 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
162 416 : ],
163 416 : )
164 416 : .expect("failed to define a metric")
165 416 : });
166 :
167 416 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
168 416 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
169 416 : register_histogram!(
170 416 : "pageserver_deltas_per_read_global",
171 416 : "Number of delta pages applied to image page per read",
172 416 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
173 416 : )
174 416 : .expect("failed to define a metric")
175 416 : });
176 :
177 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
178 0 : register_uint_gauge!(
179 0 : "pageserver_concurrent_initdb",
180 0 : "Number of initdb processes running"
181 0 : )
182 0 : .expect("failed to define a metric")
183 0 : });
184 :
185 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
186 0 : register_histogram!(
187 0 : "pageserver_initdb_semaphore_seconds_global",
188 0 : "Time spent getting a permit from the global initdb semaphore",
189 0 : STORAGE_OP_BUCKETS.into()
190 0 : )
191 0 : .expect("failed to define metric")
192 0 : });
193 :
194 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
195 0 : register_histogram!(
196 0 : "pageserver_initdb_seconds_global",
197 0 : "Time spent performing initdb",
198 0 : STORAGE_OP_BUCKETS.into()
199 0 : )
200 0 : .expect("failed to define metric")
201 0 : });
202 :
203 : pub(crate) struct GetVectoredLatency {
204 : map: EnumMap<TaskKind, Option<Histogram>>,
205 : }
206 :
207 : #[allow(dead_code)]
208 : pub(crate) struct ScanLatency {
209 : map: EnumMap<TaskKind, Option<Histogram>>,
210 : }
211 :
212 : impl GetVectoredLatency {
213 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
214 : // cardinality of the metric.
215 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
216 :
217 43504 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
218 43504 : self.map[task_kind].as_ref()
219 43504 : }
220 : }
221 :
222 : impl ScanLatency {
223 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
224 : // cardinality of the metric.
225 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
226 :
227 24 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
228 24 : self.map[task_kind].as_ref()
229 24 : }
230 : }
231 :
232 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
233 : parent: &'a Histogram,
234 : start: std::time::Instant,
235 : }
236 :
237 : impl<'a> ScanLatencyOngoingRecording<'a> {
238 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
239 0 : let start = Instant::now();
240 0 : ScanLatencyOngoingRecording { parent, start }
241 0 : }
242 :
243 0 : pub(crate) fn observe(self) {
244 0 : let elapsed = self.start.elapsed();
245 0 : self.parent.observe(elapsed.as_secs_f64());
246 0 : }
247 : }
248 :
249 408 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
250 408 : let inner = register_histogram_vec!(
251 408 : "pageserver_get_vectored_seconds",
252 408 : "Time spent in get_vectored.",
253 408 : &["task_kind"],
254 408 : CRITICAL_OP_BUCKETS.into(),
255 408 : )
256 408 : .expect("failed to define a metric");
257 408 :
258 408 : GetVectoredLatency {
259 12648 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
260 12648 : let task_kind = TaskKind::from_usize(task_kind_idx);
261 12648 :
262 12648 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
263 816 : let task_kind = task_kind.into();
264 816 : Some(inner.with_label_values(&[task_kind]))
265 : } else {
266 11832 : None
267 : }
268 12648 : })),
269 408 : }
270 408 : });
271 :
272 8 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
273 8 : let inner = register_histogram_vec!(
274 8 : "pageserver_scan_seconds",
275 8 : "Time spent in scan.",
276 8 : &["task_kind"],
277 8 : CRITICAL_OP_BUCKETS.into(),
278 8 : )
279 8 : .expect("failed to define a metric");
280 8 :
281 8 : ScanLatency {
282 248 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
283 248 : let task_kind = TaskKind::from_usize(task_kind_idx);
284 248 :
285 248 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
286 8 : let task_kind = task_kind.into();
287 8 : Some(inner.with_label_values(&[task_kind]))
288 : } else {
289 240 : None
290 : }
291 248 : })),
292 8 : }
293 8 : });
294 :
295 : pub(crate) struct PageCacheMetricsForTaskKind {
296 : pub read_accesses_immutable: IntCounter,
297 : pub read_hits_immutable: IntCounter,
298 : }
299 :
300 : pub(crate) struct PageCacheMetrics {
301 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
302 : }
303 :
304 200 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
305 200 : register_int_counter_vec!(
306 200 : "pageserver_page_cache_read_hits_total",
307 200 : "Number of read accesses to the page cache that hit",
308 200 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
309 200 : )
310 200 : .expect("failed to define a metric")
311 200 : });
312 :
313 200 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
314 200 : register_int_counter_vec!(
315 200 : "pageserver_page_cache_read_accesses_total",
316 200 : "Number of read accesses to the page cache",
317 200 : &["task_kind", "key_kind", "content_kind"]
318 200 : )
319 200 : .expect("failed to define a metric")
320 200 : });
321 :
322 200 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
323 6200 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
324 6200 : let task_kind = TaskKind::from_usize(task_kind);
325 6200 : let task_kind: &'static str = task_kind.into();
326 49600 : EnumMap::from_array(std::array::from_fn(|content_kind| {
327 49600 : let content_kind = PageContentKind::from_usize(content_kind);
328 49600 : let content_kind: &'static str = content_kind.into();
329 49600 : PageCacheMetricsForTaskKind {
330 49600 : read_accesses_immutable: {
331 49600 : PAGE_CACHE_READ_ACCESSES
332 49600 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
333 49600 : .unwrap()
334 49600 : },
335 49600 :
336 49600 : read_hits_immutable: {
337 49600 : PAGE_CACHE_READ_HITS
338 49600 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
339 49600 : .unwrap()
340 49600 : },
341 49600 : }
342 49600 : }))
343 6200 : })),
344 200 : });
345 :
346 : impl PageCacheMetrics {
347 2249546 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
348 2249546 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
349 2249546 : }
350 : }
351 :
352 : pub(crate) struct PageCacheSizeMetrics {
353 : pub max_bytes: UIntGauge,
354 :
355 : pub current_bytes_immutable: UIntGauge,
356 : }
357 :
358 200 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
359 200 : register_uint_gauge_vec!(
360 200 : "pageserver_page_cache_size_current_bytes",
361 200 : "Current size of the page cache in bytes, by key kind",
362 200 : &["key_kind"]
363 200 : )
364 200 : .expect("failed to define a metric")
365 200 : });
366 :
367 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
368 200 : Lazy::new(|| PageCacheSizeMetrics {
369 200 : max_bytes: {
370 200 : register_uint_gauge!(
371 200 : "pageserver_page_cache_size_max_bytes",
372 200 : "Maximum size of the page cache in bytes"
373 200 : )
374 200 : .expect("failed to define a metric")
375 200 : },
376 200 : current_bytes_immutable: {
377 200 : PAGE_CACHE_SIZE_CURRENT_BYTES
378 200 : .get_metric_with_label_values(&["immutable"])
379 200 : .unwrap()
380 200 : },
381 200 : });
382 :
383 : pub(crate) mod page_cache_eviction_metrics {
384 : use std::num::NonZeroUsize;
385 :
386 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
387 : use once_cell::sync::Lazy;
388 :
389 : #[derive(Clone, Copy)]
390 : pub(crate) enum Outcome {
391 : FoundSlotUnused { iters: NonZeroUsize },
392 : FoundSlotEvicted { iters: NonZeroUsize },
393 : ItersExceeded { iters: NonZeroUsize },
394 : }
395 :
396 200 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
397 200 : register_int_counter_vec!(
398 200 : "pageserver_page_cache_find_victim_iters_total",
399 200 : "Counter for the number of iterations in the find_victim loop",
400 200 : &["outcome"],
401 200 : )
402 200 : .expect("failed to define a metric")
403 200 : });
404 :
405 200 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
406 200 : register_int_counter_vec!(
407 200 : "pageserver_page_cache_find_victim_calls",
408 200 : "Incremented at the end of each find_victim() call.\
409 200 : Filter by outcome to get e.g., eviction rate.",
410 200 : &["outcome"]
411 200 : )
412 200 : .unwrap()
413 200 : });
414 :
415 61708 : pub(crate) fn observe(outcome: Outcome) {
416 : macro_rules! dry {
417 : ($label:literal, $iters:expr) => {{
418 : static LABEL: &'static str = $label;
419 : static ITERS_TOTAL: Lazy<IntCounter> =
420 236 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
421 : static CALLS: Lazy<IntCounter> =
422 236 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
423 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
424 : CALLS.inc();
425 : }};
426 : }
427 61708 : match outcome {
428 3280 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
429 58428 : Outcome::FoundSlotEvicted { iters } => {
430 58428 : dry!("found_evicted", iters)
431 : }
432 0 : Outcome::ItersExceeded { iters } => {
433 0 : dry!("err_iters_exceeded", iters);
434 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
435 0 : }
436 : }
437 61708 : }
438 : }
439 :
440 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
441 0 : register_int_counter_vec!(
442 0 : "page_cache_errors_total",
443 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
444 0 : &["error_kind"]
445 0 : )
446 0 : .expect("failed to define a metric")
447 0 : });
448 :
449 : #[derive(IntoStaticStr)]
450 : #[strum(serialize_all = "kebab_case")]
451 : pub(crate) enum PageCacheErrorKind {
452 : AcquirePinnedSlotTimeout,
453 : EvictIterLimit,
454 : }
455 :
456 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
457 0 : PAGE_CACHE_ERRORS
458 0 : .get_metric_with_label_values(&[error_kind.into()])
459 0 : .unwrap()
460 0 : .inc();
461 0 : }
462 :
463 44 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
464 44 : register_histogram!(
465 44 : "pageserver_wait_lsn_seconds",
466 44 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
467 44 : CRITICAL_OP_BUCKETS.into(),
468 44 : )
469 44 : .expect("failed to define a metric")
470 44 : });
471 :
472 424 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
473 424 : register_int_counter_pair_vec!(
474 424 : "pageserver_wait_lsn_started_count",
475 424 : "Number of wait_lsn operations started.",
476 424 : "pageserver_wait_lsn_finished_count",
477 424 : "Number of wait_lsn operations finished.",
478 424 : &["tenant_id", "shard_id", "timeline_id"],
479 424 : )
480 424 : .expect("failed to define a metric")
481 424 : });
482 :
483 424 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
484 424 : register_int_counter_vec!(
485 424 : "pageserver_wait_lsn_in_progress_micros",
486 424 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
487 424 : &["tenant_id", "shard_id", "timeline_id"],
488 424 : )
489 424 : .expect("failed to define a metric")
490 424 : });
491 :
492 424 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
493 424 : register_int_counter!(
494 424 : "pageserver_wait_lsn_in_progress_micros_global",
495 424 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
496 424 : )
497 424 : .expect("failed to define a metric")
498 424 : });
499 :
500 : pub(crate) mod wait_ondemand_download_time {
501 : use super::*;
502 : const WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS: &[f64] = &[
503 : 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, // 10 ms - 100ms
504 : 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, // 100ms to 1s
505 : 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, // 1s to 10s
506 : 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, // 10s to 1m
507 : ];
508 :
509 : /// The task kinds for which we want to track wait times for on-demand downloads.
510 : /// Other task kinds' wait times are accumulated in label value `unknown`.
511 : pub(crate) const WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS: [TaskKind; 2] = [
512 : TaskKind::PageRequestHandler,
513 : TaskKind::WalReceiverConnectionHandler,
514 : ];
515 :
516 0 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL: Lazy<Vec<Histogram>> = Lazy::new(|| {
517 0 : let histo = register_histogram_vec!(
518 0 : "pageserver_wait_ondemand_download_seconds_global",
519 0 : "Observations are individual tasks' wait times for on-demand downloads. \
520 0 : If N tasks coalesce on an on-demand download, and it takes 10s, than we observe N * 10s.",
521 0 : &["task_kind"],
522 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS.into(),
523 0 : )
524 0 : .expect("failed to define a metric");
525 0 : WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
526 0 : .iter()
527 0 : .map(|task_kind| histo.with_label_values(&[task_kind.into()]))
528 0 : .collect::<Vec<_>>()
529 0 : });
530 :
531 424 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_SUM: Lazy<CounterVec> = Lazy::new(|| {
532 424 : register_counter_vec!(
533 424 : // use a name that _could_ be evolved into a per-timeline histogram later
534 424 : "pageserver_wait_ondemand_download_seconds_sum",
535 424 : "Like `pageserver_wait_ondemand_download_seconds_global` but per timeline",
536 424 : &["tenant_id", "shard_id", "timeline_id", "task_kind"],
537 424 : )
538 424 : .unwrap()
539 424 : });
540 :
541 : pub struct WaitOndemandDownloadTimeSum {
542 : counters: [Counter; WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS.len()],
543 : }
544 :
545 : impl WaitOndemandDownloadTimeSum {
546 928 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
547 928 : let counters = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
548 928 : .iter()
549 1856 : .map(|task_kind| {
550 1856 : WAIT_ONDEMAND_DOWNLOAD_TIME_SUM
551 1856 : .get_metric_with_label_values(&[
552 1856 : tenant_id,
553 1856 : shard_id,
554 1856 : timeline_id,
555 1856 : task_kind.into(),
556 1856 : ])
557 1856 : .unwrap()
558 1856 : })
559 928 : .collect::<Vec<_>>();
560 928 : Self {
561 928 : counters: counters.try_into().unwrap(),
562 928 : }
563 928 : }
564 48 : pub(crate) fn observe(&self, task_kind: TaskKind, duration: Duration) {
565 48 : let maybe = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
566 48 : .iter()
567 48 : .enumerate()
568 96 : .find(|(_, kind)| **kind == task_kind);
569 48 : let Some((idx, _)) = maybe else {
570 48 : return;
571 : };
572 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL[idx].observe(duration.as_secs_f64());
573 0 : let counter = &self.counters[idx];
574 0 : counter.inc_by(duration.as_secs_f64());
575 48 : }
576 : }
577 :
578 20 : pub(crate) fn shutdown_timeline(tenant_id: &str, shard_id: &str, timeline_id: &str) {
579 60 : for task_kind in WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS {
580 40 : let _ = WAIT_ONDEMAND_DOWNLOAD_TIME_SUM.remove_label_values(&[
581 40 : tenant_id,
582 40 : shard_id,
583 40 : timeline_id,
584 40 : task_kind.into(),
585 40 : ]);
586 40 : }
587 20 : }
588 :
589 0 : pub(crate) fn preinitialize_global_metrics() {
590 0 : Lazy::force(&WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL);
591 0 : }
592 : }
593 :
594 424 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
595 424 : register_int_gauge_vec!(
596 424 : "pageserver_last_record_lsn",
597 424 : "Last record LSN grouped by timeline",
598 424 : &["tenant_id", "shard_id", "timeline_id"]
599 424 : )
600 424 : .expect("failed to define a metric")
601 424 : });
602 :
603 424 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
604 424 : register_int_gauge_vec!(
605 424 : "pageserver_disk_consistent_lsn",
606 424 : "Disk consistent LSN grouped by timeline",
607 424 : &["tenant_id", "shard_id", "timeline_id"]
608 424 : )
609 424 : .expect("failed to define a metric")
610 424 : });
611 :
612 424 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
613 424 : register_uint_gauge_vec!(
614 424 : "pageserver_projected_remote_consistent_lsn",
615 424 : "Projected remote consistent LSN grouped by timeline",
616 424 : &["tenant_id", "shard_id", "timeline_id"]
617 424 : )
618 424 : .expect("failed to define a metric")
619 424 : });
620 :
621 424 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
622 424 : register_uint_gauge_vec!(
623 424 : "pageserver_pitr_history_size",
624 424 : "Data written since PITR cutoff on this timeline",
625 424 : &["tenant_id", "shard_id", "timeline_id"]
626 424 : )
627 424 : .expect("failed to define a metric")
628 424 : });
629 :
630 : #[derive(
631 240 : strum_macros::EnumIter,
632 0 : strum_macros::EnumString,
633 : strum_macros::Display,
634 : strum_macros::IntoStaticStr,
635 : )]
636 : #[strum(serialize_all = "kebab_case")]
637 : pub(crate) enum LayerKind {
638 : Delta,
639 : Image,
640 : }
641 :
642 : #[derive(
643 100 : strum_macros::EnumIter,
644 0 : strum_macros::EnumString,
645 : strum_macros::Display,
646 : strum_macros::IntoStaticStr,
647 : )]
648 : #[strum(serialize_all = "kebab_case")]
649 : pub(crate) enum LayerLevel {
650 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
651 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
652 : Frozen,
653 : L0,
654 : L1,
655 : }
656 :
657 416 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
658 416 : register_uint_gauge_vec!(
659 416 : "pageserver_layer_bytes",
660 416 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
661 416 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
662 416 : )
663 416 : .expect("failed to define a metric")
664 416 : });
665 :
666 416 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
667 416 : register_uint_gauge_vec!(
668 416 : "pageserver_layer_count",
669 416 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
670 416 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
671 416 : )
672 416 : .expect("failed to define a metric")
673 416 : });
674 :
675 424 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
676 424 : register_uint_gauge_vec!(
677 424 : "pageserver_archive_size",
678 424 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
679 424 : &["tenant_id", "shard_id", "timeline_id"]
680 424 : )
681 424 : .expect("failed to define a metric")
682 424 : });
683 :
684 424 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
685 424 : register_int_gauge_vec!(
686 424 : "pageserver_standby_horizon",
687 424 : "Standby apply LSN for which GC is hold off, by timeline.",
688 424 : &["tenant_id", "shard_id", "timeline_id"]
689 424 : )
690 424 : .expect("failed to define a metric")
691 424 : });
692 :
693 424 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
694 424 : register_uint_gauge_vec!(
695 424 : "pageserver_resident_physical_size",
696 424 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
697 424 : &["tenant_id", "shard_id", "timeline_id"]
698 424 : )
699 424 : .expect("failed to define a metric")
700 424 : });
701 :
702 424 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
703 424 : register_uint_gauge_vec!(
704 424 : "pageserver_visible_physical_size",
705 424 : "The size of the layer files present in the pageserver's filesystem.",
706 424 : &["tenant_id", "shard_id", "timeline_id"]
707 424 : )
708 424 : .expect("failed to define a metric")
709 424 : });
710 :
711 416 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
712 416 : register_uint_gauge!(
713 416 : "pageserver_resident_physical_size_global",
714 416 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
715 416 : )
716 416 : .expect("failed to define a metric")
717 416 : });
718 :
719 424 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
720 424 : register_uint_gauge_vec!(
721 424 : "pageserver_remote_physical_size",
722 424 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
723 424 : // Corollary: If any files are missing from the index part, they won't be included here.
724 424 : &["tenant_id", "shard_id", "timeline_id"]
725 424 : )
726 424 : .expect("failed to define a metric")
727 424 : });
728 :
729 424 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
730 424 : register_uint_gauge!(
731 424 : "pageserver_remote_physical_size_global",
732 424 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
733 424 : )
734 424 : .expect("failed to define a metric")
735 424 : });
736 :
737 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
738 12 : register_int_counter!(
739 12 : "pageserver_remote_ondemand_downloaded_layers_total",
740 12 : "Total on-demand downloaded layers"
741 12 : )
742 12 : .unwrap()
743 12 : });
744 :
745 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
746 12 : register_int_counter!(
747 12 : "pageserver_remote_ondemand_downloaded_bytes_total",
748 12 : "Total bytes of layers on-demand downloaded",
749 12 : )
750 12 : .unwrap()
751 12 : });
752 :
753 424 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
754 424 : register_uint_gauge_vec!(
755 424 : "pageserver_current_logical_size",
756 424 : "Current logical size grouped by timeline",
757 424 : &["tenant_id", "shard_id", "timeline_id"]
758 424 : )
759 424 : .expect("failed to define current logical size metric")
760 424 : });
761 :
762 424 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
763 424 : register_int_gauge_vec!(
764 424 : "pageserver_aux_file_estimated_size",
765 424 : "The size of all aux files for a timeline in aux file v2 store.",
766 424 : &["tenant_id", "shard_id", "timeline_id"]
767 424 : )
768 424 : .expect("failed to define a metric")
769 424 : });
770 :
771 424 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
772 424 : register_uint_gauge_vec!(
773 424 : "pageserver_valid_lsn_lease_count",
774 424 : "The number of valid leases after refreshing gc info.",
775 424 : &["tenant_id", "shard_id", "timeline_id"],
776 424 : )
777 424 : .expect("failed to define a metric")
778 424 : });
779 :
780 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
781 0 : register_int_counter!(
782 0 : "pageserver_circuit_breaker_broken",
783 0 : "How many times a circuit breaker has broken"
784 0 : )
785 0 : .expect("failed to define a metric")
786 0 : });
787 :
788 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
789 0 : register_int_counter!(
790 0 : "pageserver_circuit_breaker_unbroken",
791 0 : "How many times a circuit breaker has been un-broken (recovered)"
792 0 : )
793 0 : .expect("failed to define a metric")
794 0 : });
795 :
796 408 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
797 408 : register_int_counter!(
798 408 : "pageserver_compression_image_in_bytes_total",
799 408 : "Size of data written into image layers before compression"
800 408 : )
801 408 : .expect("failed to define a metric")
802 408 : });
803 :
804 408 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
805 408 : register_int_counter!(
806 408 : "pageserver_compression_image_in_bytes_considered",
807 408 : "Size of potentially compressible data written into image layers before compression"
808 408 : )
809 408 : .expect("failed to define a metric")
810 408 : });
811 :
812 408 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
813 408 : register_int_counter!(
814 408 : "pageserver_compression_image_in_bytes_chosen",
815 408 : "Size of data whose compressed form was written into image layers"
816 408 : )
817 408 : .expect("failed to define a metric")
818 408 : });
819 :
820 408 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
821 408 : register_int_counter!(
822 408 : "pageserver_compression_image_out_bytes_total",
823 408 : "Size of compressed image layer written"
824 408 : )
825 408 : .expect("failed to define a metric")
826 408 : });
827 :
828 20 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
829 20 : register_uint_gauge!(
830 20 : "pageserver_relsize_cache_entries",
831 20 : "Number of entries in the relation size cache",
832 20 : )
833 20 : .expect("failed to define a metric")
834 20 : });
835 :
836 20 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
837 20 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
838 20 : .expect("failed to define a metric")
839 20 : });
840 :
841 20 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
842 20 : register_int_counter!(
843 20 : "pageserver_relsize_cache_misses",
844 20 : "Relation size cache misses",
845 20 : )
846 20 : .expect("failed to define a metric")
847 20 : });
848 :
849 8 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
850 8 : register_int_counter!(
851 8 : "pageserver_relsize_cache_misses_old",
852 8 : "Relation size cache misses where the lookup LSN is older than the last relation update"
853 8 : )
854 8 : .expect("failed to define a metric")
855 8 : });
856 :
857 : pub(crate) mod initial_logical_size {
858 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
859 : use once_cell::sync::Lazy;
860 :
861 : pub(crate) struct StartCalculation(IntCounterVec);
862 424 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
863 424 : StartCalculation(
864 424 : register_int_counter_vec!(
865 424 : "pageserver_initial_logical_size_start_calculation",
866 424 : "Incremented each time we start an initial logical size calculation attempt. \
867 424 : The `circumstances` label provides some additional details.",
868 424 : &["attempt", "circumstances"]
869 424 : )
870 424 : .unwrap(),
871 424 : )
872 424 : });
873 :
874 : struct DropCalculation {
875 : first: IntCounter,
876 : retry: IntCounter,
877 : }
878 :
879 424 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
880 424 : let vec = register_int_counter_vec!(
881 424 : "pageserver_initial_logical_size_drop_calculation",
882 424 : "Incremented each time we abort a started size calculation attmpt.",
883 424 : &["attempt"]
884 424 : )
885 424 : .unwrap();
886 424 : DropCalculation {
887 424 : first: vec.with_label_values(&["first"]),
888 424 : retry: vec.with_label_values(&["retry"]),
889 424 : }
890 424 : });
891 :
892 : pub(crate) struct Calculated {
893 : pub(crate) births: IntCounter,
894 : pub(crate) deaths: IntCounter,
895 : }
896 :
897 424 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
898 424 : births: register_int_counter!(
899 424 : "pageserver_initial_logical_size_finish_calculation",
900 424 : "Incremented every time we finish calculation of initial logical size.\
901 424 : If everything is working well, this should happen at most once per Timeline object."
902 424 : )
903 424 : .unwrap(),
904 424 : deaths: register_int_counter!(
905 424 : "pageserver_initial_logical_size_drop_finished_calculation",
906 424 : "Incremented when we drop a finished initial logical size calculation result.\
907 424 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
908 424 : )
909 424 : .unwrap(),
910 424 : });
911 :
912 : pub(crate) struct OngoingCalculationGuard {
913 : inc_drop_calculation: Option<IntCounter>,
914 : }
915 :
916 : #[derive(strum_macros::IntoStaticStr)]
917 : pub(crate) enum StartCircumstances {
918 : EmptyInitial,
919 : SkippedConcurrencyLimiter,
920 : AfterBackgroundTasksRateLimit,
921 : }
922 :
923 : impl StartCalculation {
924 448 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
925 448 : let circumstances_label: &'static str = circumstances.into();
926 448 : self.0
927 448 : .with_label_values(&["first", circumstances_label])
928 448 : .inc();
929 448 : OngoingCalculationGuard {
930 448 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
931 448 : }
932 448 : }
933 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
934 0 : let circumstances_label: &'static str = circumstances.into();
935 0 : self.0
936 0 : .with_label_values(&["retry", circumstances_label])
937 0 : .inc();
938 0 : OngoingCalculationGuard {
939 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
940 0 : }
941 0 : }
942 : }
943 :
944 : impl Drop for OngoingCalculationGuard {
945 448 : fn drop(&mut self) {
946 448 : if let Some(counter) = self.inc_drop_calculation.take() {
947 0 : counter.inc();
948 448 : }
949 448 : }
950 : }
951 :
952 : impl OngoingCalculationGuard {
953 448 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
954 448 : drop(self.inc_drop_calculation.take());
955 448 : CALCULATED.births.inc();
956 448 : FinishedCalculationGuard {
957 448 : inc_on_drop: CALCULATED.deaths.clone(),
958 448 : }
959 448 : }
960 : }
961 :
962 : pub(crate) struct FinishedCalculationGuard {
963 : inc_on_drop: IntCounter,
964 : }
965 :
966 : impl Drop for FinishedCalculationGuard {
967 12 : fn drop(&mut self) {
968 12 : self.inc_on_drop.inc();
969 12 : }
970 : }
971 :
972 : // context: https://github.com/neondatabase/neon/issues/5963
973 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
974 0 : Lazy::new(|| {
975 0 : register_int_counter!(
976 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
977 0 : "Counter for the following event: walreceiver calls\
978 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
979 0 : )
980 0 : .unwrap()
981 0 : });
982 : }
983 :
984 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
985 0 : register_uint_gauge_vec!(
986 0 : "pageserver_directory_entries_count",
987 0 : "Sum of the entries in pageserver-stored directory listings",
988 0 : &["tenant_id", "shard_id", "timeline_id"]
989 0 : )
990 0 : .expect("failed to define a metric")
991 0 : });
992 :
993 428 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
994 428 : register_uint_gauge_vec!(
995 428 : "pageserver_tenant_states_count",
996 428 : "Count of tenants per state",
997 428 : &["state"]
998 428 : )
999 428 : .expect("Failed to register pageserver_tenant_states_count metric")
1000 428 : });
1001 :
1002 : /// A set of broken tenants.
1003 : ///
1004 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
1005 : /// tenant.
1006 20 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
1007 20 : register_uint_gauge_vec!(
1008 20 : "pageserver_broken_tenants_count",
1009 20 : "Set of broken tenants",
1010 20 : &["tenant_id", "shard_id"]
1011 20 : )
1012 20 : .expect("Failed to register pageserver_tenant_states_count metric")
1013 20 : });
1014 :
1015 12 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1016 12 : register_uint_gauge_vec!(
1017 12 : "pageserver_tenant_synthetic_cached_size_bytes",
1018 12 : "Synthetic size of each tenant in bytes",
1019 12 : &["tenant_id"]
1020 12 : )
1021 12 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
1022 12 : });
1023 :
1024 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
1025 0 : register_histogram_vec!(
1026 0 : "pageserver_eviction_iteration_duration_seconds_global",
1027 0 : "Time spent on a single eviction iteration",
1028 0 : &["period_secs", "threshold_secs"],
1029 0 : STORAGE_OP_BUCKETS.into(),
1030 0 : )
1031 0 : .expect("failed to define a metric")
1032 0 : });
1033 :
1034 424 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
1035 424 : register_int_counter_vec!(
1036 424 : "pageserver_evictions",
1037 424 : "Number of layers evicted from the pageserver",
1038 424 : &["tenant_id", "shard_id", "timeline_id"]
1039 424 : )
1040 424 : .expect("failed to define a metric")
1041 424 : });
1042 :
1043 424 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
1044 424 : register_int_counter_vec!(
1045 424 : "pageserver_evictions_with_low_residence_duration",
1046 424 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
1047 424 : Residence duration is determined using the `residence_duration_data_source`.",
1048 424 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
1049 424 : )
1050 424 : .expect("failed to define a metric")
1051 424 : });
1052 :
1053 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
1054 0 : register_int_counter!(
1055 0 : "pageserver_unexpected_ondemand_downloads_count",
1056 0 : "Number of unexpected on-demand downloads. \
1057 0 : We log more context for each increment, so, forgo any labels in this metric.",
1058 0 : )
1059 0 : .expect("failed to define a metric")
1060 0 : });
1061 :
1062 : /// How long did we take to start up? Broken down by labels to describe
1063 : /// different phases of startup.
1064 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
1065 0 : register_gauge_vec!(
1066 0 : "pageserver_startup_duration_seconds",
1067 0 : "Time taken by phases of pageserver startup, in seconds",
1068 0 : &["phase"]
1069 0 : )
1070 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
1071 0 : });
1072 :
1073 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
1074 0 : register_uint_gauge!(
1075 0 : "pageserver_startup_is_loading",
1076 0 : "1 while in initial startup load of tenants, 0 at other times"
1077 0 : )
1078 0 : .expect("Failed to register pageserver_startup_is_loading")
1079 0 : });
1080 :
1081 416 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
1082 416 : register_uint_gauge!(
1083 416 : "pageserver_timeline_ephemeral_bytes",
1084 416 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
1085 416 : )
1086 416 : .expect("Failed to register metric")
1087 416 : });
1088 :
1089 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
1090 : /// like how long it took to load.
1091 : ///
1092 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1093 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1094 : /// at a timeline level than tenant level.
1095 : pub(crate) struct TenantMetrics {
1096 : /// How long did tenants take to go from construction to active state?
1097 : pub(crate) activation: Histogram,
1098 : pub(crate) preload: Histogram,
1099 : pub(crate) attach: Histogram,
1100 :
1101 : /// How many tenants are included in the initial startup of the pagesrever?
1102 : pub(crate) startup_scheduled: IntCounter,
1103 : pub(crate) startup_complete: IntCounter,
1104 : }
1105 :
1106 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1107 0 : TenantMetrics {
1108 0 : activation: register_histogram!(
1109 0 : "pageserver_tenant_activation_seconds",
1110 0 : "Time taken by tenants to activate, in seconds",
1111 0 : CRITICAL_OP_BUCKETS.into()
1112 0 : )
1113 0 : .expect("Failed to register metric"),
1114 0 : preload: register_histogram!(
1115 0 : "pageserver_tenant_preload_seconds",
1116 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1117 0 : CRITICAL_OP_BUCKETS.into()
1118 0 : )
1119 0 : .expect("Failed to register metric"),
1120 0 : attach: register_histogram!(
1121 0 : "pageserver_tenant_attach_seconds",
1122 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1123 0 : CRITICAL_OP_BUCKETS.into()
1124 0 : )
1125 0 : .expect("Failed to register metric"),
1126 0 : startup_scheduled: register_int_counter!(
1127 0 : "pageserver_tenant_startup_scheduled",
1128 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1129 0 : ).expect("Failed to register metric"),
1130 0 : startup_complete: register_int_counter!(
1131 0 : "pageserver_tenant_startup_complete",
1132 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1133 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1134 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1135 0 : ).expect("Failed to register metric"),
1136 0 : }
1137 0 : });
1138 :
1139 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1140 : #[derive(Debug)]
1141 : pub(crate) struct EvictionsWithLowResidenceDuration {
1142 : data_source: &'static str,
1143 : threshold: Duration,
1144 : counter: Option<IntCounter>,
1145 : }
1146 :
1147 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1148 : data_source: &'static str,
1149 : threshold: Duration,
1150 : }
1151 :
1152 : impl EvictionsWithLowResidenceDurationBuilder {
1153 928 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1154 928 : Self {
1155 928 : data_source,
1156 928 : threshold,
1157 928 : }
1158 928 : }
1159 :
1160 928 : fn build(
1161 928 : &self,
1162 928 : tenant_id: &str,
1163 928 : shard_id: &str,
1164 928 : timeline_id: &str,
1165 928 : ) -> EvictionsWithLowResidenceDuration {
1166 928 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1167 928 : .get_metric_with_label_values(&[
1168 928 : tenant_id,
1169 928 : shard_id,
1170 928 : timeline_id,
1171 928 : self.data_source,
1172 928 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1173 928 : ])
1174 928 : .unwrap();
1175 928 : EvictionsWithLowResidenceDuration {
1176 928 : data_source: self.data_source,
1177 928 : threshold: self.threshold,
1178 928 : counter: Some(counter),
1179 928 : }
1180 928 : }
1181 : }
1182 :
1183 : impl EvictionsWithLowResidenceDuration {
1184 948 : fn threshold_label_value(threshold: Duration) -> String {
1185 948 : format!("{}", threshold.as_secs())
1186 948 : }
1187 :
1188 8 : pub fn observe(&self, observed_value: Duration) {
1189 8 : if observed_value < self.threshold {
1190 8 : self.counter
1191 8 : .as_ref()
1192 8 : .expect("nobody calls this function after `remove_from_vec`")
1193 8 : .inc();
1194 8 : }
1195 8 : }
1196 :
1197 0 : pub fn change_threshold(
1198 0 : &mut self,
1199 0 : tenant_id: &str,
1200 0 : shard_id: &str,
1201 0 : timeline_id: &str,
1202 0 : new_threshold: Duration,
1203 0 : ) {
1204 0 : if new_threshold == self.threshold {
1205 0 : return;
1206 0 : }
1207 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1208 0 : self.data_source,
1209 0 : new_threshold,
1210 0 : )
1211 0 : .build(tenant_id, shard_id, timeline_id);
1212 0 : std::mem::swap(self, &mut with_new);
1213 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1214 0 : }
1215 :
1216 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1217 20 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1218 20 : let Some(_counter) = self.counter.take() else {
1219 0 : return;
1220 : };
1221 :
1222 20 : let threshold = Self::threshold_label_value(self.threshold);
1223 20 :
1224 20 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1225 20 : tenant_id,
1226 20 : shard_id,
1227 20 : timeline_id,
1228 20 : self.data_source,
1229 20 : &threshold,
1230 20 : ]);
1231 20 :
1232 20 : match removed {
1233 0 : Err(e) => {
1234 0 : // this has been hit in staging as
1235 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1236 0 : // because we can be in the drop path already, don't risk:
1237 0 : // - "double-panic => illegal instruction" or
1238 0 : // - future "drop panick => abort"
1239 0 : //
1240 0 : // so just nag: (the error has the labels)
1241 0 : tracing::warn!(
1242 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1243 : );
1244 : }
1245 : Ok(()) => {
1246 : // to help identify cases where we double-remove the same values, let's log all
1247 : // deletions?
1248 20 : tracing::info!(
1249 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1250 : self.data_source
1251 : );
1252 : }
1253 : }
1254 20 : }
1255 : }
1256 :
1257 : // Metrics collected on disk IO operations
1258 : //
1259 : // Roughly logarithmic scale.
1260 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1261 : 0.000030, // 30 usec
1262 : 0.001000, // 1000 usec
1263 : 0.030, // 30 ms
1264 : 1.000, // 1000 ms
1265 : 30.000, // 30000 ms
1266 : ];
1267 :
1268 : /// VirtualFile fs operation variants.
1269 : ///
1270 : /// Operations:
1271 : /// - open ([`std::fs::OpenOptions::open`])
1272 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1273 : /// - close-by-replace (close by replacement algorithm)
1274 : /// - read (`read_at`)
1275 : /// - write (`write_at`)
1276 : /// - seek (modify internal position or file length query)
1277 : /// - fsync ([`std::fs::File::sync_all`])
1278 : /// - metadata ([`std::fs::File::metadata`])
1279 : #[derive(
1280 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1281 : )]
1282 : pub(crate) enum StorageIoOperation {
1283 : Open,
1284 : OpenAfterReplace,
1285 : Close,
1286 : CloseByReplace,
1287 : Read,
1288 : Write,
1289 : Seek,
1290 : Fsync,
1291 : Metadata,
1292 : }
1293 :
1294 : impl StorageIoOperation {
1295 4356 : pub fn as_str(&self) -> &'static str {
1296 4356 : match self {
1297 484 : StorageIoOperation::Open => "open",
1298 484 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1299 484 : StorageIoOperation::Close => "close",
1300 484 : StorageIoOperation::CloseByReplace => "close-by-replace",
1301 484 : StorageIoOperation::Read => "read",
1302 484 : StorageIoOperation::Write => "write",
1303 484 : StorageIoOperation::Seek => "seek",
1304 484 : StorageIoOperation::Fsync => "fsync",
1305 484 : StorageIoOperation::Metadata => "metadata",
1306 : }
1307 4356 : }
1308 : }
1309 :
1310 : /// Tracks time taken by fs operations near VirtualFile.
1311 : #[derive(Debug)]
1312 : pub(crate) struct StorageIoTime {
1313 : metrics: [Histogram; StorageIoOperation::COUNT],
1314 : }
1315 :
1316 : impl StorageIoTime {
1317 484 : fn new() -> Self {
1318 484 : let storage_io_histogram_vec = register_histogram_vec!(
1319 484 : "pageserver_io_operations_seconds",
1320 484 : "Time spent in IO operations",
1321 484 : &["operation"],
1322 484 : STORAGE_IO_TIME_BUCKETS.into()
1323 484 : )
1324 484 : .expect("failed to define a metric");
1325 4356 : let metrics = std::array::from_fn(|i| {
1326 4356 : let op = StorageIoOperation::from_repr(i).unwrap();
1327 4356 : storage_io_histogram_vec
1328 4356 : .get_metric_with_label_values(&[op.as_str()])
1329 4356 : .unwrap()
1330 4356 : });
1331 484 : Self { metrics }
1332 484 : }
1333 :
1334 2190676 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1335 2190676 : &self.metrics[op as usize]
1336 2190676 : }
1337 : }
1338 :
1339 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1340 :
1341 : #[derive(Clone, Copy)]
1342 : #[repr(usize)]
1343 : pub(crate) enum StorageIoSizeOperation {
1344 : Read,
1345 : Write,
1346 : }
1347 :
1348 : impl StorageIoSizeOperation {
1349 : pub(crate) const VARIANTS: &'static [&'static str] = &["read", "write"];
1350 :
1351 2976 : fn as_str(&self) -> &'static str {
1352 2976 : Self::VARIANTS[*self as usize]
1353 2976 : }
1354 : }
1355 :
1356 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1357 560 : pub(crate) static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1358 560 : register_uint_gauge_vec!(
1359 560 : "pageserver_io_operations_bytes_total",
1360 560 : "Total amount of bytes read/written in IO operations",
1361 560 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1362 560 : )
1363 560 : .expect("failed to define a metric")
1364 560 : });
1365 :
1366 : #[derive(Clone, Debug)]
1367 : pub(crate) struct StorageIoSizeMetrics {
1368 : pub read: UIntGauge,
1369 : pub write: UIntGauge,
1370 : }
1371 :
1372 : impl StorageIoSizeMetrics {
1373 1488 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1374 1488 : let read = STORAGE_IO_SIZE
1375 1488 : .get_metric_with_label_values(&[
1376 1488 : StorageIoSizeOperation::Read.as_str(),
1377 1488 : tenant_id,
1378 1488 : shard_id,
1379 1488 : timeline_id,
1380 1488 : ])
1381 1488 : .unwrap();
1382 1488 : let write = STORAGE_IO_SIZE
1383 1488 : .get_metric_with_label_values(&[
1384 1488 : StorageIoSizeOperation::Write.as_str(),
1385 1488 : tenant_id,
1386 1488 : shard_id,
1387 1488 : timeline_id,
1388 1488 : ])
1389 1488 : .unwrap();
1390 1488 : Self { read, write }
1391 1488 : }
1392 : }
1393 :
1394 : #[cfg(not(test))]
1395 : pub(crate) mod virtual_file_descriptor_cache {
1396 : use super::*;
1397 :
1398 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1399 0 : register_uint_gauge!(
1400 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1401 0 : "Maximum number of open file descriptors in the cache."
1402 0 : )
1403 0 : .unwrap()
1404 0 : });
1405 :
1406 : // SIZE_CURRENT: derive it like so:
1407 : // ```
1408 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1409 : // -ignoring(operation)
1410 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1411 : // ```
1412 : }
1413 :
1414 : #[cfg(not(test))]
1415 : pub(crate) mod virtual_file_io_engine {
1416 : use super::*;
1417 :
1418 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1419 0 : register_uint_gauge_vec!(
1420 0 : "pageserver_virtual_file_io_engine_kind",
1421 0 : "The configured io engine for VirtualFile",
1422 0 : &["kind"],
1423 0 : )
1424 0 : .unwrap()
1425 0 : });
1426 : }
1427 :
1428 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1429 : pub(crate) struct SmgrOpTimerInner {
1430 : global_execution_latency_histo: Histogram,
1431 : per_timeline_execution_latency_histo: Option<Histogram>,
1432 :
1433 : global_batch_wait_time: Histogram,
1434 : per_timeline_batch_wait_time: Histogram,
1435 :
1436 : global_flush_in_progress_micros: IntCounter,
1437 : per_timeline_flush_in_progress_micros: IntCounter,
1438 :
1439 : throttling: Arc<tenant_throttling::Pagestream>,
1440 :
1441 : timings: SmgrOpTimerState,
1442 : }
1443 :
1444 : /// The stages of request processing are represented by the enum variants.
1445 : /// Used as part of [`SmgrOpTimerInner::timings`].
1446 : ///
1447 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1448 : /// transition points.
1449 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1450 : /// to the next state.
1451 : ///
1452 : /// Each request goes through every stage, in all configurations.
1453 : ///
1454 : #[derive(Debug)]
1455 : enum SmgrOpTimerState {
1456 : Received {
1457 : // In the future, we may want to track the full time the request spent
1458 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1459 : // `received_at` would be used for that.
1460 : #[allow(dead_code)]
1461 : received_at: Instant,
1462 : },
1463 : Throttling {
1464 : throttle_started_at: Instant,
1465 : },
1466 : Batching {
1467 : throttle_done_at: Instant,
1468 : },
1469 : Executing {
1470 : execution_started_at: Instant,
1471 : },
1472 : Flushing,
1473 : // NB: when adding observation points, remember to update the Drop impl.
1474 : }
1475 :
1476 : // NB: when adding observation points, remember to update the Drop impl.
1477 : impl SmgrOpTimer {
1478 : /// See [`SmgrOpTimerState`] for more context.
1479 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1480 0 : let Some(inner) = self.0.as_mut() else {
1481 0 : return;
1482 : };
1483 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1484 0 : return;
1485 : };
1486 0 : inner.throttling.count_accounted_start.inc();
1487 0 : inner.timings = SmgrOpTimerState::Throttling {
1488 0 : throttle_started_at: at,
1489 0 : };
1490 0 : }
1491 :
1492 : /// See [`SmgrOpTimerState`] for more context.
1493 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1494 0 : let Some(inner) = self.0.as_mut() else {
1495 0 : return;
1496 : };
1497 : let SmgrOpTimerState::Throttling {
1498 0 : throttle_started_at,
1499 0 : } = &inner.timings
1500 : else {
1501 0 : return;
1502 : };
1503 0 : inner.throttling.count_accounted_finish.inc();
1504 0 : match throttle {
1505 0 : ThrottleResult::NotThrottled { end } => {
1506 0 : inner.timings = SmgrOpTimerState::Batching {
1507 0 : throttle_done_at: end,
1508 0 : };
1509 0 : }
1510 0 : ThrottleResult::Throttled { end } => {
1511 0 : // update metrics
1512 0 : inner.throttling.count_throttled.inc();
1513 0 : inner
1514 0 : .throttling
1515 0 : .wait_time
1516 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1517 0 : // state transition
1518 0 : inner.timings = SmgrOpTimerState::Batching {
1519 0 : throttle_done_at: end,
1520 0 : };
1521 0 : }
1522 : }
1523 0 : }
1524 :
1525 : /// See [`SmgrOpTimerState`] for more context.
1526 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1527 0 : let Some(inner) = self.0.as_mut() else {
1528 0 : return;
1529 : };
1530 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1531 0 : return;
1532 : };
1533 : // update metrics
1534 0 : let batch = at - *throttle_done_at;
1535 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1536 0 : inner
1537 0 : .per_timeline_batch_wait_time
1538 0 : .observe(batch.as_secs_f64());
1539 0 : // state transition
1540 0 : inner.timings = SmgrOpTimerState::Executing {
1541 0 : execution_started_at: at,
1542 0 : }
1543 0 : }
1544 :
1545 : /// For all but the first caller, this is a no-op.
1546 : /// The first callers receives Some, subsequent ones None.
1547 : ///
1548 : /// See [`SmgrOpTimerState`] for more context.
1549 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1550 : // NB: unlike the other observe_* methods, this one take()s.
1551 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1552 0 : let Some(mut inner) = self.0.take() else {
1553 0 : return None;
1554 : };
1555 : let SmgrOpTimerState::Executing {
1556 0 : execution_started_at,
1557 0 : } = &inner.timings
1558 : else {
1559 0 : return None;
1560 : };
1561 : // update metrics
1562 0 : let execution = at - *execution_started_at;
1563 0 : inner
1564 0 : .global_execution_latency_histo
1565 0 : .observe(execution.as_secs_f64());
1566 0 : if let Some(per_timeline_execution_latency_histo) =
1567 0 : &inner.per_timeline_execution_latency_histo
1568 0 : {
1569 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1570 0 : }
1571 :
1572 : // state transition
1573 0 : inner.timings = SmgrOpTimerState::Flushing;
1574 0 :
1575 0 : // return the flush in progress object which
1576 0 : // will do the remaining metrics updates
1577 0 : let SmgrOpTimerInner {
1578 0 : global_flush_in_progress_micros,
1579 0 : per_timeline_flush_in_progress_micros,
1580 0 : ..
1581 0 : } = inner;
1582 0 : Some(SmgrOpFlushInProgress {
1583 0 : global_micros: global_flush_in_progress_micros,
1584 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1585 0 : })
1586 0 : }
1587 : }
1588 :
1589 : /// The last stage of request processing is serializing and flushing the request
1590 : /// into the TCP connection. We want to make slow flushes observable
1591 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1592 : /// to periodically bump the metric.
1593 : ///
1594 : /// If in the future we decide that we're not interested in live updates, we can
1595 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1596 : /// and remove this struct from the code base.
1597 : pub(crate) struct SmgrOpFlushInProgress {
1598 : global_micros: IntCounter,
1599 : per_timeline_micros: IntCounter,
1600 : }
1601 :
1602 : impl Drop for SmgrOpTimer {
1603 0 : fn drop(&mut self) {
1604 0 : // In case of early drop, update any of the remaining metrics with
1605 0 : // observations so that (started,finished) counter pairs balance out
1606 0 : // and all counters on the latency path have the the same number of
1607 0 : // observations.
1608 0 : // It's technically lying and it would be better if each metric had
1609 0 : // a separate label or similar for cancelled requests.
1610 0 : // But we don't have that right now and counter pairs balancing
1611 0 : // out is useful when using the metrics in panels and whatnot.
1612 0 : let now = Instant::now();
1613 0 : self.observe_throttle_start(now);
1614 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1615 0 : self.observe_execution_start(now);
1616 0 : let maybe_flush_timer = self.observe_execution_end(now);
1617 0 : drop(maybe_flush_timer);
1618 0 : }
1619 : }
1620 :
1621 : impl SmgrOpFlushInProgress {
1622 : /// The caller must guarantee that `socket_fd`` outlives this function.
1623 0 : pub(crate) async fn measure<Fut, O>(
1624 0 : self,
1625 0 : started_at: Instant,
1626 0 : mut fut: Fut,
1627 0 : socket_fd: RawFd,
1628 0 : ) -> O
1629 0 : where
1630 0 : Fut: std::future::Future<Output = O>,
1631 0 : {
1632 0 : let mut fut = std::pin::pin!(fut);
1633 0 :
1634 0 : let mut logged = false;
1635 0 : let mut last_counter_increment_at = started_at;
1636 0 : let mut observe_guard = scopeguard::guard(
1637 0 : |is_timeout| {
1638 0 : let now = Instant::now();
1639 0 :
1640 0 : // Increment counter
1641 0 : {
1642 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1643 0 : self.global_micros
1644 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1645 0 : self.per_timeline_micros
1646 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1647 0 : last_counter_increment_at = now;
1648 0 : }
1649 0 :
1650 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1651 0 : if is_timeout || logged {
1652 0 : logged = true;
1653 0 : let elapsed_total = now - started_at;
1654 0 : let msg = if is_timeout {
1655 0 : "slow flush ongoing"
1656 : } else {
1657 0 : "slow flush completed or cancelled"
1658 : };
1659 :
1660 0 : let (inq, outq) = {
1661 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1662 0 : #[cfg(target_os = "linux")]
1663 0 : unsafe {
1664 0 : (
1665 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1666 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1667 0 : )
1668 0 : }
1669 0 : #[cfg(not(target_os = "linux"))]
1670 0 : {
1671 0 : _ = socket_fd; // appease unused lint on macOS
1672 0 : (-1, -1)
1673 0 : }
1674 0 : };
1675 0 :
1676 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1677 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1678 0 : }
1679 0 : },
1680 0 : |mut observe| {
1681 0 : observe(false);
1682 0 : },
1683 0 : );
1684 :
1685 : loop {
1686 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1687 0 : Ok(v) => return v,
1688 0 : Err(_timeout) => {
1689 0 : (*observe_guard)(true);
1690 0 : }
1691 : }
1692 : }
1693 0 : }
1694 : }
1695 :
1696 : #[derive(
1697 : Debug,
1698 : Clone,
1699 : Copy,
1700 : IntoStaticStr,
1701 : strum_macros::EnumCount,
1702 0 : strum_macros::EnumIter,
1703 : strum_macros::FromRepr,
1704 : enum_map::Enum,
1705 : )]
1706 : #[strum(serialize_all = "snake_case")]
1707 : pub enum SmgrQueryType {
1708 : GetRelExists,
1709 : GetRelSize,
1710 : GetPageAtLsn,
1711 : GetDbSize,
1712 : GetSlruSegment,
1713 : #[cfg(feature = "testing")]
1714 : Test,
1715 : }
1716 :
1717 : #[derive(
1718 : Debug,
1719 : Clone,
1720 : Copy,
1721 : IntoStaticStr,
1722 : strum_macros::EnumCount,
1723 180 : strum_macros::EnumIter,
1724 : strum_macros::FromRepr,
1725 : enum_map::Enum,
1726 : )]
1727 : #[strum(serialize_all = "snake_case")]
1728 : pub enum GetPageBatchBreakReason {
1729 : BatchFull,
1730 : NonBatchableRequest,
1731 : NonUniformLsn,
1732 : SamePageAtDifferentLsn,
1733 : NonUniformTimeline,
1734 : ExecutorSteal,
1735 : #[cfg(feature = "testing")]
1736 : NonUniformKey,
1737 : }
1738 :
1739 : pub(crate) struct SmgrQueryTimePerTimeline {
1740 : global_started: [IntCounter; SmgrQueryType::COUNT],
1741 : global_latency: [Histogram; SmgrQueryType::COUNT],
1742 : per_timeline_getpage_started: IntCounter,
1743 : per_timeline_getpage_latency: Histogram,
1744 : global_batch_size: Histogram,
1745 : per_timeline_batch_size: Histogram,
1746 : global_flush_in_progress_micros: IntCounter,
1747 : per_timeline_flush_in_progress_micros: IntCounter,
1748 : global_batch_wait_time: Histogram,
1749 : per_timeline_batch_wait_time: Histogram,
1750 : global_batch_break_reason: [IntCounter; GetPageBatchBreakReason::COUNT],
1751 : per_timeline_batch_break_reason: GetPageBatchBreakReasonTimelineMetrics,
1752 : throttling: Arc<tenant_throttling::Pagestream>,
1753 : }
1754 :
1755 424 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1756 424 : register_int_counter_vec!(
1757 424 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1758 424 : "pageserver_smgr_query_started_global_count",
1759 424 : "Number of smgr queries started, aggregated by query type.",
1760 424 : &["smgr_query_type"],
1761 424 : )
1762 424 : .expect("failed to define a metric")
1763 424 : });
1764 :
1765 424 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1766 424 : register_int_counter_vec!(
1767 424 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1768 424 : "pageserver_smgr_query_started_count",
1769 424 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1770 424 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1771 424 : )
1772 424 : .expect("failed to define a metric")
1773 424 : });
1774 :
1775 : // Alias so all histograms recording per-timeline smgr timings use the same buckets.
1776 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] = CRITICAL_OP_BUCKETS;
1777 :
1778 424 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1779 424 : register_histogram_vec!(
1780 424 : "pageserver_smgr_query_seconds",
1781 424 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1782 424 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1783 424 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1784 424 : )
1785 424 : .expect("failed to define a metric")
1786 424 : });
1787 :
1788 424 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1789 424 : [
1790 424 : 1,
1791 424 : 10,
1792 424 : 20,
1793 424 : 40,
1794 424 : 60,
1795 424 : 80,
1796 424 : 100,
1797 424 : 200,
1798 424 : 300,
1799 424 : 400,
1800 424 : 500,
1801 424 : 600,
1802 424 : 700,
1803 424 : 800,
1804 424 : 900,
1805 424 : 1_000, // 1ms
1806 424 : 2_000,
1807 424 : 4_000,
1808 424 : 6_000,
1809 424 : 8_000,
1810 424 : 10_000, // 10ms
1811 424 : 20_000,
1812 424 : 40_000,
1813 424 : 60_000,
1814 424 : 80_000,
1815 424 : 100_000,
1816 424 : 200_000,
1817 424 : 400_000,
1818 424 : 600_000,
1819 424 : 800_000,
1820 424 : 1_000_000, // 1s
1821 424 : 2_000_000,
1822 424 : 4_000_000,
1823 424 : 6_000_000,
1824 424 : 8_000_000,
1825 424 : 10_000_000, // 10s
1826 424 : 20_000_000,
1827 424 : 50_000_000,
1828 424 : 100_000_000,
1829 424 : 200_000_000,
1830 424 : 1_000_000_000, // 1000s
1831 424 : ]
1832 424 : .into_iter()
1833 424 : .map(Duration::from_micros)
1834 17384 : .map(|d| d.as_secs_f64())
1835 424 : .collect()
1836 424 : });
1837 :
1838 424 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1839 424 : register_histogram_vec!(
1840 424 : "pageserver_smgr_query_seconds_global",
1841 424 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1842 424 : &["smgr_query_type"],
1843 424 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1844 424 : )
1845 424 : .expect("failed to define a metric")
1846 424 : });
1847 :
1848 424 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1849 424 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1850 13568 : .map(|v| v.into())
1851 424 : .collect()
1852 424 : });
1853 :
1854 424 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1855 424 : register_histogram!(
1856 424 : "pageserver_page_service_batch_size_global",
1857 424 : "Batch size of pageserver page service requests",
1858 424 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1859 424 : )
1860 424 : .expect("failed to define a metric")
1861 424 : });
1862 :
1863 424 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1864 424 : let mut buckets = Vec::new();
1865 2968 : for i in 0.. {
1866 2968 : let bucket = 1 << i;
1867 2968 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1868 424 : break;
1869 2544 : }
1870 2544 : buckets.push(bucket.into());
1871 : }
1872 424 : buckets
1873 424 : });
1874 :
1875 424 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1876 424 : register_histogram_vec!(
1877 424 : "pageserver_page_service_batch_size",
1878 424 : "Batch size of pageserver page service requests",
1879 424 : &["tenant_id", "shard_id", "timeline_id"],
1880 424 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1881 424 : )
1882 424 : .expect("failed to define a metric")
1883 424 : });
1884 :
1885 424 : static PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1886 424 : register_int_counter_vec!(
1887 424 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1888 424 : "pageserver_page_service_batch_break_reason_global",
1889 424 : "Reason for breaking batches of get page requests",
1890 424 : &["reason"],
1891 424 : )
1892 424 : .expect("failed to define a metric")
1893 424 : });
1894 :
1895 : struct GetPageBatchBreakReasonTimelineMetrics {
1896 : map: EnumMap<GetPageBatchBreakReason, IntCounter>,
1897 : }
1898 :
1899 : impl GetPageBatchBreakReasonTimelineMetrics {
1900 928 : fn new(tenant_id: &str, shard_slug: &str, timeline_id: &str) -> Self {
1901 928 : GetPageBatchBreakReasonTimelineMetrics {
1902 6496 : map: EnumMap::from_array(std::array::from_fn(|reason_idx| {
1903 6496 : let reason = GetPageBatchBreakReason::from_usize(reason_idx);
1904 6496 : PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.with_label_values(&[
1905 6496 : tenant_id,
1906 6496 : shard_slug,
1907 6496 : timeline_id,
1908 6496 : reason.into(),
1909 6496 : ])
1910 6496 : })),
1911 928 : }
1912 928 : }
1913 :
1914 0 : fn inc(&self, reason: GetPageBatchBreakReason) {
1915 0 : self.map[reason].inc()
1916 0 : }
1917 : }
1918 :
1919 424 : static PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1920 424 : register_int_counter_vec!(
1921 424 : "pageserver_page_service_batch_break_reason",
1922 424 : "Reason for breaking batches of get page requests",
1923 424 : &["tenant_id", "shard_id", "timeline_id", "reason"],
1924 424 : )
1925 424 : .expect("failed to define a metric")
1926 424 : });
1927 :
1928 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1929 0 : register_int_gauge_vec!(
1930 0 : "pageserver_page_service_config_max_batch_size",
1931 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1932 0 : Labels expose more of the configuration parameters.",
1933 0 : &["mode", "execution", "batching"]
1934 0 : )
1935 0 : .expect("failed to define a metric")
1936 0 : });
1937 :
1938 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1939 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1940 0 : let (label_values, value) = match conf {
1941 0 : PageServicePipeliningConfig::Serial => (["serial", "-", "-"], 1),
1942 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1943 0 : max_batch_size,
1944 0 : execution,
1945 0 : batching,
1946 0 : }) => {
1947 0 : let mode = "pipelined";
1948 0 : let execution = match execution {
1949 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1950 0 : "concurrent-futures"
1951 : }
1952 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1953 : };
1954 0 : let batching = match batching {
1955 0 : PageServiceProtocolPipelinedBatchingStrategy::UniformLsn => "uniform-lsn",
1956 0 : PageServiceProtocolPipelinedBatchingStrategy::ScatteredLsn => "scattered-lsn",
1957 : };
1958 :
1959 0 : ([mode, execution, batching], max_batch_size.get())
1960 : }
1961 : };
1962 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1963 0 : .with_label_values(&label_values)
1964 0 : .set(value.try_into().unwrap());
1965 0 : }
1966 :
1967 424 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1968 424 : register_int_counter_vec!(
1969 424 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1970 424 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1971 424 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1972 424 : easily discoverable in monitoring. \
1973 424 : Hence, this is NOT a completion latency historgram.",
1974 424 : &["tenant_id", "shard_id", "timeline_id"],
1975 424 : )
1976 424 : .expect("failed to define a metric")
1977 424 : });
1978 :
1979 424 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
1980 424 : register_int_counter!(
1981 424 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
1982 424 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
1983 424 : )
1984 424 : .expect("failed to define a metric")
1985 424 : });
1986 :
1987 424 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1988 424 : register_histogram_vec!(
1989 424 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
1990 424 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
1991 424 : &["tenant_id", "shard_id", "timeline_id"],
1992 424 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1993 424 : )
1994 424 : .expect("failed to define a metric")
1995 424 : });
1996 :
1997 424 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1998 424 : register_histogram!(
1999 424 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
2000 424 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
2001 424 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
2002 424 : )
2003 424 : .expect("failed to define a metric")
2004 424 : });
2005 :
2006 : impl SmgrQueryTimePerTimeline {
2007 928 : pub(crate) fn new(
2008 928 : tenant_shard_id: &TenantShardId,
2009 928 : timeline_id: &TimelineId,
2010 928 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
2011 928 : ) -> Self {
2012 928 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2013 928 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
2014 928 : let timeline_id = timeline_id.to_string();
2015 5568 : let global_started = std::array::from_fn(|i| {
2016 5568 : let op = SmgrQueryType::from_repr(i).unwrap();
2017 5568 : SMGR_QUERY_STARTED_GLOBAL
2018 5568 : .get_metric_with_label_values(&[op.into()])
2019 5568 : .unwrap()
2020 5568 : });
2021 5568 : let global_latency = std::array::from_fn(|i| {
2022 5568 : let op = SmgrQueryType::from_repr(i).unwrap();
2023 5568 : SMGR_QUERY_TIME_GLOBAL
2024 5568 : .get_metric_with_label_values(&[op.into()])
2025 5568 : .unwrap()
2026 5568 : });
2027 928 :
2028 928 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
2029 928 : .get_metric_with_label_values(&[
2030 928 : SmgrQueryType::GetPageAtLsn.into(),
2031 928 : &tenant_id,
2032 928 : &shard_slug,
2033 928 : &timeline_id,
2034 928 : ])
2035 928 : .unwrap();
2036 928 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
2037 928 : .get_metric_with_label_values(&[
2038 928 : SmgrQueryType::GetPageAtLsn.into(),
2039 928 : &tenant_id,
2040 928 : &shard_slug,
2041 928 : &timeline_id,
2042 928 : ])
2043 928 : .unwrap();
2044 928 :
2045 928 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
2046 928 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
2047 928 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2048 928 : .unwrap();
2049 928 :
2050 928 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
2051 928 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
2052 928 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2053 928 : .unwrap();
2054 928 :
2055 6496 : let global_batch_break_reason = std::array::from_fn(|i| {
2056 6496 : let reason = GetPageBatchBreakReason::from_usize(i);
2057 6496 : PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL
2058 6496 : .get_metric_with_label_values(&[reason.into()])
2059 6496 : .unwrap()
2060 6496 : });
2061 928 : let per_timeline_batch_break_reason =
2062 928 : GetPageBatchBreakReasonTimelineMetrics::new(&tenant_id, &shard_slug, &timeline_id);
2063 928 :
2064 928 : let global_flush_in_progress_micros =
2065 928 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
2066 928 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
2067 928 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2068 928 : .unwrap();
2069 928 :
2070 928 : Self {
2071 928 : global_started,
2072 928 : global_latency,
2073 928 : per_timeline_getpage_latency,
2074 928 : per_timeline_getpage_started,
2075 928 : global_batch_size,
2076 928 : per_timeline_batch_size,
2077 928 : global_flush_in_progress_micros,
2078 928 : per_timeline_flush_in_progress_micros,
2079 928 : global_batch_wait_time,
2080 928 : per_timeline_batch_wait_time,
2081 928 : global_batch_break_reason,
2082 928 : per_timeline_batch_break_reason,
2083 928 : throttling: pagestream_throttle_metrics,
2084 928 : }
2085 928 : }
2086 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
2087 0 : self.global_started[op as usize].inc();
2088 :
2089 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
2090 0 : self.per_timeline_getpage_started.inc();
2091 0 : Some(self.per_timeline_getpage_latency.clone())
2092 : } else {
2093 0 : None
2094 : };
2095 :
2096 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
2097 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
2098 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
2099 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
2100 0 : per_timeline_flush_in_progress_micros: self
2101 0 : .per_timeline_flush_in_progress_micros
2102 0 : .clone(),
2103 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
2104 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
2105 0 : throttling: self.throttling.clone(),
2106 0 : timings: SmgrOpTimerState::Received { received_at },
2107 0 : }))
2108 0 : }
2109 :
2110 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
2111 0 : pub(crate) fn observe_getpage_batch_start(
2112 0 : &self,
2113 0 : batch_size: usize,
2114 0 : break_reason: GetPageBatchBreakReason,
2115 0 : ) {
2116 0 : self.global_batch_size.observe(batch_size as f64);
2117 0 : self.per_timeline_batch_size.observe(batch_size as f64);
2118 0 :
2119 0 : self.global_batch_break_reason[break_reason.into_usize()].inc();
2120 0 : self.per_timeline_batch_break_reason.inc(break_reason);
2121 0 : }
2122 : }
2123 :
2124 : // keep in sync with control plane Go code so that we can validate
2125 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
2126 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
2127 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
2128 0 : [
2129 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
2130 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
2131 0 : ]
2132 0 : .map(|ms| (ms as f64) / 1000.0)
2133 0 : });
2134 :
2135 : pub(crate) struct BasebackupQueryTime {
2136 : ok: Histogram,
2137 : error: Histogram,
2138 : client_error: Histogram,
2139 : }
2140 :
2141 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
2142 0 : let vec = register_histogram_vec!(
2143 0 : "pageserver_basebackup_query_seconds",
2144 0 : "Histogram of basebackup queries durations, by result type",
2145 0 : &["result"],
2146 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
2147 0 : )
2148 0 : .expect("failed to define a metric");
2149 0 : BasebackupQueryTime {
2150 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
2151 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
2152 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
2153 0 : }
2154 0 : });
2155 :
2156 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
2157 : parent: &'a BasebackupQueryTime,
2158 : start: std::time::Instant,
2159 : }
2160 :
2161 : impl BasebackupQueryTime {
2162 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
2163 0 : let start = Instant::now();
2164 0 : BasebackupQueryTimeOngoingRecording {
2165 0 : parent: self,
2166 0 : start,
2167 0 : }
2168 0 : }
2169 : }
2170 :
2171 : impl BasebackupQueryTimeOngoingRecording<'_> {
2172 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
2173 0 : let elapsed = self.start.elapsed().as_secs_f64();
2174 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
2175 0 : let metric = match res {
2176 0 : Ok(_) => &self.parent.ok,
2177 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
2178 0 : if is_expected_io_error(io_error) =>
2179 0 : {
2180 0 : &self.parent.client_error
2181 : }
2182 0 : Err(_) => &self.parent.error,
2183 : };
2184 0 : metric.observe(elapsed);
2185 0 : }
2186 : }
2187 :
2188 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2189 0 : register_int_counter_pair_vec!(
2190 0 : "pageserver_live_connections_started",
2191 0 : "Number of network connections that we started handling",
2192 0 : "pageserver_live_connections_finished",
2193 0 : "Number of network connections that we finished handling",
2194 0 : &["pageserver_connection_kind"]
2195 0 : )
2196 0 : .expect("failed to define a metric")
2197 0 : });
2198 :
2199 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2200 : pub(crate) enum ComputeCommandKind {
2201 : PageStreamV3,
2202 : PageStreamV2,
2203 : Basebackup,
2204 : Fullbackup,
2205 : LeaseLsn,
2206 : }
2207 :
2208 : pub(crate) struct ComputeCommandCounters {
2209 : map: EnumMap<ComputeCommandKind, IntCounter>,
2210 : }
2211 :
2212 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2213 0 : let inner = register_int_counter_vec!(
2214 0 : "pageserver_compute_commands",
2215 0 : "Number of compute -> pageserver commands processed",
2216 0 : &["command"]
2217 0 : )
2218 0 : .expect("failed to define a metric");
2219 0 :
2220 0 : ComputeCommandCounters {
2221 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2222 0 : let command = ComputeCommandKind::from_usize(i);
2223 0 : let command_str: &'static str = command.into();
2224 0 : inner.with_label_values(&[command_str])
2225 0 : })),
2226 0 : }
2227 0 : });
2228 :
2229 : impl ComputeCommandCounters {
2230 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2231 0 : &self.map[command]
2232 0 : }
2233 : }
2234 :
2235 : // remote storage metrics
2236 :
2237 416 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2238 416 : register_int_counter_pair_vec!(
2239 416 : "pageserver_remote_timeline_client_calls_started",
2240 416 : "Number of started calls to remote timeline client.",
2241 416 : "pageserver_remote_timeline_client_calls_finished",
2242 416 : "Number of finshed calls to remote timeline client.",
2243 416 : &[
2244 416 : "tenant_id",
2245 416 : "shard_id",
2246 416 : "timeline_id",
2247 416 : "file_kind",
2248 416 : "op_kind"
2249 416 : ],
2250 416 : )
2251 416 : .unwrap()
2252 416 : });
2253 :
2254 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2255 412 : Lazy::new(|| {
2256 412 : register_int_counter_vec!(
2257 412 : "pageserver_remote_timeline_client_bytes_started",
2258 412 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2259 412 : The increment happens when the operation is scheduled.",
2260 412 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2261 412 : )
2262 412 : .expect("failed to define a metric")
2263 412 : });
2264 :
2265 412 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2266 412 : register_int_counter_vec!(
2267 412 : "pageserver_remote_timeline_client_bytes_finished",
2268 412 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2269 412 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2270 412 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2271 412 : )
2272 412 : .expect("failed to define a metric")
2273 412 : });
2274 :
2275 : pub(crate) struct TenantManagerMetrics {
2276 : tenant_slots_attached: UIntGauge,
2277 : tenant_slots_secondary: UIntGauge,
2278 : tenant_slots_inprogress: UIntGauge,
2279 : pub(crate) tenant_slot_writes: IntCounter,
2280 : pub(crate) unexpected_errors: IntCounter,
2281 : }
2282 :
2283 : impl TenantManagerMetrics {
2284 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2285 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2286 4 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2287 4 : match slot {
2288 0 : TenantSlot::Attached(_) => {
2289 0 : self.tenant_slots_attached.inc();
2290 0 : }
2291 0 : TenantSlot::Secondary(_) => {
2292 0 : self.tenant_slots_secondary.inc();
2293 0 : }
2294 4 : TenantSlot::InProgress(_) => {
2295 4 : self.tenant_slots_inprogress.inc();
2296 4 : }
2297 : }
2298 4 : }
2299 :
2300 4 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2301 4 : match slot {
2302 4 : TenantSlot::Attached(_) => {
2303 4 : self.tenant_slots_attached.dec();
2304 4 : }
2305 0 : TenantSlot::Secondary(_) => {
2306 0 : self.tenant_slots_secondary.dec();
2307 0 : }
2308 0 : TenantSlot::InProgress(_) => {
2309 0 : self.tenant_slots_inprogress.dec();
2310 0 : }
2311 : }
2312 4 : }
2313 :
2314 : #[cfg(all(debug_assertions, not(test)))]
2315 0 : pub(crate) fn slots_total(&self) -> u64 {
2316 0 : self.tenant_slots_attached.get()
2317 0 : + self.tenant_slots_secondary.get()
2318 0 : + self.tenant_slots_inprogress.get()
2319 0 : }
2320 : }
2321 :
2322 4 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2323 4 : let tenant_slots = register_uint_gauge_vec!(
2324 4 : "pageserver_tenant_manager_slots",
2325 4 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2326 4 : &["mode"]
2327 4 : )
2328 4 : .expect("failed to define a metric");
2329 4 : TenantManagerMetrics {
2330 4 : tenant_slots_attached: tenant_slots
2331 4 : .get_metric_with_label_values(&["attached"])
2332 4 : .unwrap(),
2333 4 : tenant_slots_secondary: tenant_slots
2334 4 : .get_metric_with_label_values(&["secondary"])
2335 4 : .unwrap(),
2336 4 : tenant_slots_inprogress: tenant_slots
2337 4 : .get_metric_with_label_values(&["inprogress"])
2338 4 : .unwrap(),
2339 4 : tenant_slot_writes: register_int_counter!(
2340 4 : "pageserver_tenant_manager_slot_writes",
2341 4 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2342 4 : )
2343 4 : .expect("failed to define a metric"),
2344 4 : unexpected_errors: register_int_counter!(
2345 4 : "pageserver_tenant_manager_unexpected_errors_total",
2346 4 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2347 4 : )
2348 4 : .expect("failed to define a metric"),
2349 4 : }
2350 4 : });
2351 :
2352 : pub(crate) struct DeletionQueueMetrics {
2353 : pub(crate) keys_submitted: IntCounter,
2354 : pub(crate) keys_dropped: IntCounter,
2355 : pub(crate) keys_executed: IntCounter,
2356 : pub(crate) keys_validated: IntCounter,
2357 : pub(crate) dropped_lsn_updates: IntCounter,
2358 : pub(crate) unexpected_errors: IntCounter,
2359 : pub(crate) remote_errors: IntCounterVec,
2360 : }
2361 67 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2362 67 : DeletionQueueMetrics{
2363 67 :
2364 67 : keys_submitted: register_int_counter!(
2365 67 : "pageserver_deletion_queue_submitted_total",
2366 67 : "Number of objects submitted for deletion"
2367 67 : )
2368 67 : .expect("failed to define a metric"),
2369 67 :
2370 67 : keys_dropped: register_int_counter!(
2371 67 : "pageserver_deletion_queue_dropped_total",
2372 67 : "Number of object deletions dropped due to stale generation."
2373 67 : )
2374 67 : .expect("failed to define a metric"),
2375 67 :
2376 67 : keys_executed: register_int_counter!(
2377 67 : "pageserver_deletion_queue_executed_total",
2378 67 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2379 67 : )
2380 67 : .expect("failed to define a metric"),
2381 67 :
2382 67 : keys_validated: register_int_counter!(
2383 67 : "pageserver_deletion_queue_validated_total",
2384 67 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2385 67 : )
2386 67 : .expect("failed to define a metric"),
2387 67 :
2388 67 : dropped_lsn_updates: register_int_counter!(
2389 67 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2390 67 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2391 67 : )
2392 67 : .expect("failed to define a metric"),
2393 67 : unexpected_errors: register_int_counter!(
2394 67 : "pageserver_deletion_queue_unexpected_errors_total",
2395 67 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2396 67 : )
2397 67 : .expect("failed to define a metric"),
2398 67 : remote_errors: register_int_counter_vec!(
2399 67 : "pageserver_deletion_queue_remote_errors_total",
2400 67 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2401 67 : &["op_kind"],
2402 67 : )
2403 67 : .expect("failed to define a metric")
2404 67 : }
2405 67 : });
2406 :
2407 : pub(crate) struct SecondaryModeMetrics {
2408 : pub(crate) upload_heatmap: IntCounter,
2409 : pub(crate) upload_heatmap_errors: IntCounter,
2410 : pub(crate) upload_heatmap_duration: Histogram,
2411 : pub(crate) download_heatmap: IntCounter,
2412 : pub(crate) download_layer: IntCounter,
2413 : }
2414 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2415 0 : SecondaryModeMetrics {
2416 0 : upload_heatmap: register_int_counter!(
2417 0 : "pageserver_secondary_upload_heatmap",
2418 0 : "Number of heatmaps written to remote storage by attached tenants"
2419 0 : )
2420 0 : .expect("failed to define a metric"),
2421 0 : upload_heatmap_errors: register_int_counter!(
2422 0 : "pageserver_secondary_upload_heatmap_errors",
2423 0 : "Failures writing heatmap to remote storage"
2424 0 : )
2425 0 : .expect("failed to define a metric"),
2426 0 : upload_heatmap_duration: register_histogram!(
2427 0 : "pageserver_secondary_upload_heatmap_duration",
2428 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2429 0 : )
2430 0 : .expect("failed to define a metric"),
2431 0 : download_heatmap: register_int_counter!(
2432 0 : "pageserver_secondary_download_heatmap",
2433 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2434 0 : )
2435 0 : .expect("failed to define a metric"),
2436 0 : download_layer: register_int_counter!(
2437 0 : "pageserver_secondary_download_layer",
2438 0 : "Number of downloads of layers by secondary mode locations"
2439 0 : )
2440 0 : .expect("failed to define a metric"),
2441 0 : }
2442 0 : });
2443 :
2444 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2445 0 : register_uint_gauge_vec!(
2446 0 : "pageserver_secondary_resident_physical_size",
2447 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2448 0 : &["tenant_id", "shard_id"]
2449 0 : )
2450 0 : .expect("failed to define a metric")
2451 0 : });
2452 :
2453 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2454 0 : register_uint_gauge!(
2455 0 : "pageserver_utilization_score",
2456 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2457 0 : )
2458 0 : .expect("failed to define a metric")
2459 0 : });
2460 :
2461 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2462 0 : register_uint_gauge_vec!(
2463 0 : "pageserver_secondary_heatmap_total_size",
2464 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2465 0 : &["tenant_id", "shard_id"]
2466 0 : )
2467 0 : .expect("failed to define a metric")
2468 0 : });
2469 :
2470 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2471 : pub enum RemoteOpKind {
2472 : Upload,
2473 : Download,
2474 : Delete,
2475 : }
2476 : impl RemoteOpKind {
2477 30835 : pub fn as_str(&self) -> &'static str {
2478 30835 : match self {
2479 29033 : Self::Upload => "upload",
2480 136 : Self::Download => "download",
2481 1666 : Self::Delete => "delete",
2482 : }
2483 30835 : }
2484 : }
2485 :
2486 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2487 : pub enum RemoteOpFileKind {
2488 : Layer,
2489 : Index,
2490 : }
2491 : impl RemoteOpFileKind {
2492 30835 : pub fn as_str(&self) -> &'static str {
2493 30835 : match self {
2494 21696 : Self::Layer => "layer",
2495 9139 : Self::Index => "index",
2496 : }
2497 30835 : }
2498 : }
2499 :
2500 410 : pub(crate) static REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY: Lazy<HistogramVec> = Lazy::new(|| {
2501 410 : register_histogram_vec!(
2502 410 : "pageserver_remote_timeline_client_seconds_global",
2503 410 : "Time spent on remote timeline client operations. \
2504 410 : Grouped by task_kind, file_kind, operation_kind and status. \
2505 410 : The task_kind is \
2506 410 : - for layer downloads, populated from RequestContext (primary objective of having the label) \
2507 410 : - for index downloads, set to 'unknown' \
2508 410 : - for any upload operation, set to 'RemoteUploadTask' \
2509 410 : This keeps dimensionality at bay. \
2510 410 : Does not account for time spent waiting in remote timeline client's queues.",
2511 410 : &["task_kind", "file_kind", "op_kind", "status"]
2512 410 : )
2513 410 : .expect("failed to define a metric")
2514 410 : });
2515 :
2516 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2517 0 : register_int_counter_vec!(
2518 0 : "pageserver_tenant_task_events",
2519 0 : "Number of task start/stop/fail events.",
2520 0 : &["event"],
2521 0 : )
2522 0 : .expect("Failed to register tenant_task_events metric")
2523 0 : });
2524 :
2525 : pub struct BackgroundLoopSemaphoreMetrics {
2526 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2527 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2528 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2529 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2530 : }
2531 :
2532 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2533 40 : Lazy::new(|| {
2534 40 : let counters = register_int_counter_pair_vec!(
2535 40 : "pageserver_background_loop_semaphore_wait_start_count",
2536 40 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2537 40 : "pageserver_background_loop_semaphore_wait_finish_count",
2538 40 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2539 40 : &["task"],
2540 40 : )
2541 40 : .unwrap();
2542 40 :
2543 40 : let durations = register_histogram_vec!(
2544 40 : "pageserver_background_loop_semaphore_wait_seconds",
2545 40 : "Seconds spent waiting on background loop semaphore acquisition",
2546 40 : &["task"],
2547 40 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2548 40 : )
2549 40 : .unwrap();
2550 40 :
2551 40 : let waiting_tasks = register_int_gauge_vec!(
2552 40 : "pageserver_background_loop_semaphore_waiting_tasks",
2553 40 : "Number of background loop tasks waiting for semaphore",
2554 40 : &["task"],
2555 40 : )
2556 40 : .unwrap();
2557 40 :
2558 40 : let running_tasks = register_int_gauge_vec!(
2559 40 : "pageserver_background_loop_semaphore_running_tasks",
2560 40 : "Number of background loop tasks running concurrently",
2561 40 : &["task"],
2562 40 : )
2563 40 : .unwrap();
2564 40 :
2565 40 : BackgroundLoopSemaphoreMetrics {
2566 400 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2567 400 : let kind = BackgroundLoopKind::from_usize(i);
2568 400 : counters.with_label_values(&[kind.into()])
2569 400 : })),
2570 400 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2571 400 : let kind = BackgroundLoopKind::from_usize(i);
2572 400 : durations.with_label_values(&[kind.into()])
2573 400 : })),
2574 400 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2575 400 : let kind = BackgroundLoopKind::from_usize(i);
2576 400 : waiting_tasks.with_label_values(&[kind.into()])
2577 400 : })),
2578 400 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2579 400 : let kind = BackgroundLoopKind::from_usize(i);
2580 400 : running_tasks.with_label_values(&[kind.into()])
2581 400 : })),
2582 40 : }
2583 40 : });
2584 :
2585 : impl BackgroundLoopSemaphoreMetrics {
2586 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2587 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2588 728 : pub(crate) fn record(
2589 728 : &self,
2590 728 : task: BackgroundLoopKind,
2591 728 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2592 728 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2593 728 : }
2594 : }
2595 :
2596 : /// Records metrics for a background task.
2597 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2598 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2599 : task: BackgroundLoopKind,
2600 : start: Instant,
2601 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2602 : }
2603 :
2604 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2605 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2606 : /// `wait_start_count` and `waiting_tasks`.
2607 728 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2608 728 : metrics.waiting_tasks[task].inc();
2609 728 : Self {
2610 728 : metrics,
2611 728 : task,
2612 728 : start: Instant::now(),
2613 728 : wait_counter_guard: Some(metrics.counters[task].guard()),
2614 728 : }
2615 728 : }
2616 :
2617 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2618 728 : pub fn acquired(&mut self) -> Duration {
2619 728 : let waited = self.start.elapsed();
2620 728 : self.wait_counter_guard.take().expect("already acquired");
2621 728 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2622 728 : self.metrics.waiting_tasks[self.task].dec();
2623 728 : self.metrics.running_tasks[self.task].inc();
2624 728 : waited
2625 728 : }
2626 : }
2627 :
2628 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2629 : /// The task either completed or was cancelled.
2630 728 : fn drop(&mut self) {
2631 728 : if self.wait_counter_guard.take().is_some() {
2632 0 : // Waiting.
2633 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2634 0 : self.metrics.waiting_tasks[self.task].dec();
2635 728 : } else {
2636 728 : // Running.
2637 728 : self.metrics.running_tasks[self.task].dec();
2638 728 : }
2639 728 : }
2640 : }
2641 :
2642 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2643 0 : register_int_counter_vec!(
2644 0 : "pageserver_background_loop_period_overrun_count",
2645 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2646 0 : &["task", "period"],
2647 0 : )
2648 0 : .expect("failed to define a metric")
2649 0 : });
2650 :
2651 : // walreceiver metrics
2652 :
2653 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2654 0 : register_int_counter!(
2655 0 : "pageserver_walreceiver_started_connections_total",
2656 0 : "Number of started walreceiver connections"
2657 0 : )
2658 0 : .expect("failed to define a metric")
2659 0 : });
2660 :
2661 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2662 0 : register_int_gauge!(
2663 0 : "pageserver_walreceiver_active_managers",
2664 0 : "Number of active walreceiver managers"
2665 0 : )
2666 0 : .expect("failed to define a metric")
2667 0 : });
2668 :
2669 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2670 0 : register_int_counter_vec!(
2671 0 : "pageserver_walreceiver_switches_total",
2672 0 : "Number of walreceiver manager change_connection calls",
2673 0 : &["reason"]
2674 0 : )
2675 0 : .expect("failed to define a metric")
2676 0 : });
2677 :
2678 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2679 0 : register_int_counter!(
2680 0 : "pageserver_walreceiver_broker_updates_total",
2681 0 : "Number of received broker updates in walreceiver"
2682 0 : )
2683 0 : .expect("failed to define a metric")
2684 0 : });
2685 :
2686 4 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2687 4 : register_int_counter_vec!(
2688 4 : "pageserver_walreceiver_candidates_events_total",
2689 4 : "Number of walreceiver candidate events",
2690 4 : &["event"]
2691 4 : )
2692 4 : .expect("failed to define a metric")
2693 4 : });
2694 :
2695 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2696 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2697 :
2698 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2699 4 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2700 :
2701 : // Metrics collected on WAL redo operations
2702 : //
2703 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2704 : // for access to the postgres process ('wait') since there is only one for
2705 : // each tenant.
2706 :
2707 : /// Time buckets are small because we want to be able to measure the
2708 : /// smallest redo processing times. These buckets allow us to measure down
2709 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2710 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2711 : ///
2712 : /// Values up to 1s are recorded because metrics show that we have redo
2713 : /// durations and lock times larger than 0.250s.
2714 : macro_rules! redo_histogram_time_buckets {
2715 : () => {
2716 : vec![
2717 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2718 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2719 : 1.000_000,
2720 : ]
2721 : };
2722 : }
2723 :
2724 : /// While we're at it, also measure the amount of records replayed in each
2725 : /// operation. We have a global 'total replayed' counter, but that's not
2726 : /// as useful as 'what is the skew for how many records we replay in one
2727 : /// operation'.
2728 : macro_rules! redo_histogram_count_buckets {
2729 : () => {
2730 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2731 : };
2732 : }
2733 :
2734 : macro_rules! redo_bytes_histogram_count_buckets {
2735 : () => {
2736 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2737 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2738 : vec![
2739 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2740 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2741 : ]
2742 : };
2743 : }
2744 :
2745 : pub(crate) struct WalIngestMetrics {
2746 : pub(crate) bytes_received: IntCounter,
2747 : pub(crate) records_received: IntCounter,
2748 : pub(crate) records_observed: IntCounter,
2749 : pub(crate) records_committed: IntCounter,
2750 : pub(crate) records_filtered: IntCounter,
2751 : pub(crate) values_committed_metadata_images: IntCounter,
2752 : pub(crate) values_committed_metadata_deltas: IntCounter,
2753 : pub(crate) values_committed_data_images: IntCounter,
2754 : pub(crate) values_committed_data_deltas: IntCounter,
2755 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2756 : }
2757 :
2758 : impl WalIngestMetrics {
2759 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2760 0 : if stats.metadata_images > 0 {
2761 0 : self.values_committed_metadata_images
2762 0 : .inc_by(stats.metadata_images);
2763 0 : }
2764 0 : if stats.metadata_deltas > 0 {
2765 0 : self.values_committed_metadata_deltas
2766 0 : .inc_by(stats.metadata_deltas);
2767 0 : }
2768 0 : if stats.data_images > 0 {
2769 0 : self.values_committed_data_images.inc_by(stats.data_images);
2770 0 : }
2771 0 : if stats.data_deltas > 0 {
2772 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2773 0 : }
2774 0 : }
2775 : }
2776 :
2777 20 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2778 20 : let values_committed = register_int_counter_vec!(
2779 20 : "pageserver_wal_ingest_values_committed",
2780 20 : "Number of values committed to pageserver storage from WAL records",
2781 20 : &["class", "kind"],
2782 20 : )
2783 20 : .expect("failed to define a metric");
2784 20 :
2785 20 : WalIngestMetrics {
2786 20 : bytes_received: register_int_counter!(
2787 20 : "pageserver_wal_ingest_bytes_received",
2788 20 : "Bytes of WAL ingested from safekeepers",
2789 20 : )
2790 20 : .unwrap(),
2791 20 : records_received: register_int_counter!(
2792 20 : "pageserver_wal_ingest_records_received",
2793 20 : "Number of WAL records received from safekeepers"
2794 20 : )
2795 20 : .expect("failed to define a metric"),
2796 20 : records_observed: register_int_counter!(
2797 20 : "pageserver_wal_ingest_records_observed",
2798 20 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2799 20 : )
2800 20 : .expect("failed to define a metric"),
2801 20 : records_committed: register_int_counter!(
2802 20 : "pageserver_wal_ingest_records_committed",
2803 20 : "Number of WAL records which resulted in writes to pageserver storage"
2804 20 : )
2805 20 : .expect("failed to define a metric"),
2806 20 : records_filtered: register_int_counter!(
2807 20 : "pageserver_wal_ingest_records_filtered",
2808 20 : "Number of WAL records filtered out due to sharding"
2809 20 : )
2810 20 : .expect("failed to define a metric"),
2811 20 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2812 20 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2813 20 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2814 20 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2815 20 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2816 20 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2817 20 : "Total number of zero gap blocks written on relation extends"
2818 20 : )
2819 20 : .expect("failed to define a metric"),
2820 20 : }
2821 20 : });
2822 :
2823 424 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2824 424 : register_int_counter_vec!(
2825 424 : "pageserver_timeline_wal_records_received",
2826 424 : "Number of WAL records received per shard",
2827 424 : &["tenant_id", "shard_id", "timeline_id"]
2828 424 : )
2829 424 : .expect("failed to define a metric")
2830 424 : });
2831 :
2832 12 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2833 12 : register_histogram!(
2834 12 : "pageserver_wal_redo_seconds",
2835 12 : "Time spent on WAL redo",
2836 12 : redo_histogram_time_buckets!()
2837 12 : )
2838 12 : .expect("failed to define a metric")
2839 12 : });
2840 :
2841 12 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2842 12 : register_histogram!(
2843 12 : "pageserver_wal_redo_records_histogram",
2844 12 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2845 12 : redo_histogram_count_buckets!(),
2846 12 : )
2847 12 : .expect("failed to define a metric")
2848 12 : });
2849 :
2850 12 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2851 12 : register_histogram!(
2852 12 : "pageserver_wal_redo_bytes_histogram",
2853 12 : "Histogram of number of records replayed per redo sent to Postgres",
2854 12 : redo_bytes_histogram_count_buckets!(),
2855 12 : )
2856 12 : .expect("failed to define a metric")
2857 12 : });
2858 :
2859 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2860 12 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2861 12 : register_int_counter!(
2862 12 : "pageserver_replayed_wal_records_total",
2863 12 : "Number of WAL records replayed in WAL redo process"
2864 12 : )
2865 12 : .unwrap()
2866 12 : });
2867 :
2868 : #[rustfmt::skip]
2869 16 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2870 16 : register_histogram!(
2871 16 : "pageserver_wal_redo_process_launch_duration",
2872 16 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2873 16 : vec![
2874 16 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2875 16 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2876 16 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2877 16 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2878 16 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2879 16 : ],
2880 16 : )
2881 16 : .expect("failed to define a metric")
2882 16 : });
2883 :
2884 : pub(crate) struct WalRedoProcessCounters {
2885 : pub(crate) started: IntCounter,
2886 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2887 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2888 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2889 : }
2890 :
2891 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2892 : pub(crate) enum WalRedoKillCause {
2893 : WalRedoProcessDrop,
2894 : NoLeakChildDrop,
2895 : Startup,
2896 : }
2897 :
2898 : impl Default for WalRedoProcessCounters {
2899 16 : fn default() -> Self {
2900 16 : let started = register_int_counter!(
2901 16 : "pageserver_wal_redo_process_started_total",
2902 16 : "Number of WAL redo processes started",
2903 16 : )
2904 16 : .unwrap();
2905 16 :
2906 16 : let killed = register_int_counter_vec!(
2907 16 : "pageserver_wal_redo_process_stopped_total",
2908 16 : "Number of WAL redo processes stopped",
2909 16 : &["cause"],
2910 16 : )
2911 16 : .unwrap();
2912 16 :
2913 16 : let active_stderr_logger_tasks_started = register_int_counter!(
2914 16 : "pageserver_walredo_stderr_logger_tasks_started_total",
2915 16 : "Number of active walredo stderr logger tasks that have started",
2916 16 : )
2917 16 : .unwrap();
2918 16 :
2919 16 : let active_stderr_logger_tasks_finished = register_int_counter!(
2920 16 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2921 16 : "Number of active walredo stderr logger tasks that have finished",
2922 16 : )
2923 16 : .unwrap();
2924 16 :
2925 16 : Self {
2926 16 : started,
2927 48 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2928 48 : let cause = WalRedoKillCause::from_usize(i);
2929 48 : let cause_str: &'static str = cause.into();
2930 48 : killed.with_label_values(&[cause_str])
2931 48 : })),
2932 16 : active_stderr_logger_tasks_started,
2933 16 : active_stderr_logger_tasks_finished,
2934 16 : }
2935 16 : }
2936 : }
2937 :
2938 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2939 : Lazy::new(WalRedoProcessCounters::default);
2940 :
2941 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2942 : pub(crate) struct StorageTimeMetricsTimer {
2943 : metrics: StorageTimeMetrics,
2944 : start: Instant,
2945 : }
2946 :
2947 : impl StorageTimeMetricsTimer {
2948 4344 : fn new(metrics: StorageTimeMetrics) -> Self {
2949 4344 : Self {
2950 4344 : metrics,
2951 4344 : start: Instant::now(),
2952 4344 : }
2953 4344 : }
2954 :
2955 : /// Returns the elapsed duration of the timer.
2956 4344 : pub fn elapsed(&self) -> Duration {
2957 4344 : self.start.elapsed()
2958 4344 : }
2959 :
2960 : /// Record the time from creation to now and return it.
2961 4344 : pub fn stop_and_record(self) -> Duration {
2962 4344 : let duration = self.elapsed();
2963 4344 : let seconds = duration.as_secs_f64();
2964 4344 : self.metrics.timeline_sum.inc_by(seconds);
2965 4344 : self.metrics.timeline_count.inc();
2966 4344 : self.metrics.global_histogram.observe(seconds);
2967 4344 : duration
2968 4344 : }
2969 :
2970 : /// Turns this timer into a timer, which will always record -- usually this means recording
2971 : /// regardless an early `?` path was taken in a function.
2972 40 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2973 40 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2974 40 : }
2975 : }
2976 :
2977 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2978 :
2979 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2980 40 : fn drop(&mut self) {
2981 40 : if let Some(inner) = self.0.take() {
2982 40 : inner.stop_and_record();
2983 40 : }
2984 40 : }
2985 : }
2986 :
2987 : impl AlwaysRecordingStorageTimeMetricsTimer {
2988 : /// Returns the elapsed duration of the timer.
2989 0 : pub fn elapsed(&self) -> Duration {
2990 0 : self.0.as_ref().expect("not dropped yet").elapsed()
2991 0 : }
2992 : }
2993 :
2994 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2995 : /// timeline total sum and count.
2996 : #[derive(Clone, Debug)]
2997 : pub(crate) struct StorageTimeMetrics {
2998 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2999 : timeline_sum: Counter,
3000 : /// Number of oeprations, per operation, tenant_id and timeline_id
3001 : timeline_count: IntCounter,
3002 : /// Global histogram having only the "operation" label.
3003 : global_histogram: Histogram,
3004 : }
3005 :
3006 : impl StorageTimeMetrics {
3007 8352 : pub fn new(
3008 8352 : operation: StorageTimeOperation,
3009 8352 : tenant_id: &str,
3010 8352 : shard_id: &str,
3011 8352 : timeline_id: &str,
3012 8352 : ) -> Self {
3013 8352 : let operation: &'static str = operation.into();
3014 8352 :
3015 8352 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
3016 8352 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3017 8352 : .unwrap();
3018 8352 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
3019 8352 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3020 8352 : .unwrap();
3021 8352 : let global_histogram = STORAGE_TIME_GLOBAL
3022 8352 : .get_metric_with_label_values(&[operation])
3023 8352 : .unwrap();
3024 8352 :
3025 8352 : StorageTimeMetrics {
3026 8352 : timeline_sum,
3027 8352 : timeline_count,
3028 8352 : global_histogram,
3029 8352 : }
3030 8352 : }
3031 :
3032 : /// Starts timing a new operation.
3033 : ///
3034 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
3035 4344 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
3036 4344 : StorageTimeMetricsTimer::new(self.clone())
3037 4344 : }
3038 : }
3039 :
3040 : pub(crate) struct TimelineMetrics {
3041 : tenant_id: String,
3042 : shard_id: String,
3043 : timeline_id: String,
3044 : pub flush_time_histo: StorageTimeMetrics,
3045 : pub flush_delay_histo: StorageTimeMetrics,
3046 : pub compact_time_histo: StorageTimeMetrics,
3047 : pub create_images_time_histo: StorageTimeMetrics,
3048 : pub logical_size_histo: StorageTimeMetrics,
3049 : pub imitate_logical_size_histo: StorageTimeMetrics,
3050 : pub load_layer_map_histo: StorageTimeMetrics,
3051 : pub garbage_collect_histo: StorageTimeMetrics,
3052 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
3053 : pub last_record_lsn_gauge: IntGauge,
3054 : pub disk_consistent_lsn_gauge: IntGauge,
3055 : pub pitr_history_size: UIntGauge,
3056 : pub archival_size: UIntGauge,
3057 : pub layers_per_read: Histogram,
3058 : pub standby_horizon_gauge: IntGauge,
3059 : pub resident_physical_size_gauge: UIntGauge,
3060 : pub visible_physical_size_gauge: UIntGauge,
3061 : /// copy of LayeredTimeline.current_logical_size
3062 : pub current_logical_size_gauge: UIntGauge,
3063 : pub aux_file_size_gauge: IntGauge,
3064 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
3065 : pub evictions: IntCounter,
3066 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
3067 : /// Number of valid LSN leases.
3068 : pub valid_lsn_lease_count_gauge: UIntGauge,
3069 : pub wal_records_received: IntCounter,
3070 : pub storage_io_size: StorageIoSizeMetrics,
3071 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
3072 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
3073 : pub wait_ondemand_download_time: wait_ondemand_download_time::WaitOndemandDownloadTimeSum,
3074 : shutdown: std::sync::atomic::AtomicBool,
3075 : }
3076 :
3077 : impl TimelineMetrics {
3078 928 : pub fn new(
3079 928 : tenant_shard_id: &TenantShardId,
3080 928 : timeline_id_raw: &TimelineId,
3081 928 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
3082 928 : ) -> Self {
3083 928 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3084 928 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3085 928 : let timeline_id = timeline_id_raw.to_string();
3086 928 : let flush_time_histo = StorageTimeMetrics::new(
3087 928 : StorageTimeOperation::LayerFlush,
3088 928 : &tenant_id,
3089 928 : &shard_id,
3090 928 : &timeline_id,
3091 928 : );
3092 928 : let flush_delay_histo = StorageTimeMetrics::new(
3093 928 : StorageTimeOperation::LayerFlushDelay,
3094 928 : &tenant_id,
3095 928 : &shard_id,
3096 928 : &timeline_id,
3097 928 : );
3098 928 : let compact_time_histo = StorageTimeMetrics::new(
3099 928 : StorageTimeOperation::Compact,
3100 928 : &tenant_id,
3101 928 : &shard_id,
3102 928 : &timeline_id,
3103 928 : );
3104 928 : let create_images_time_histo = StorageTimeMetrics::new(
3105 928 : StorageTimeOperation::CreateImages,
3106 928 : &tenant_id,
3107 928 : &shard_id,
3108 928 : &timeline_id,
3109 928 : );
3110 928 : let logical_size_histo = StorageTimeMetrics::new(
3111 928 : StorageTimeOperation::LogicalSize,
3112 928 : &tenant_id,
3113 928 : &shard_id,
3114 928 : &timeline_id,
3115 928 : );
3116 928 : let imitate_logical_size_histo = StorageTimeMetrics::new(
3117 928 : StorageTimeOperation::ImitateLogicalSize,
3118 928 : &tenant_id,
3119 928 : &shard_id,
3120 928 : &timeline_id,
3121 928 : );
3122 928 : let load_layer_map_histo = StorageTimeMetrics::new(
3123 928 : StorageTimeOperation::LoadLayerMap,
3124 928 : &tenant_id,
3125 928 : &shard_id,
3126 928 : &timeline_id,
3127 928 : );
3128 928 : let garbage_collect_histo = StorageTimeMetrics::new(
3129 928 : StorageTimeOperation::Gc,
3130 928 : &tenant_id,
3131 928 : &shard_id,
3132 928 : &timeline_id,
3133 928 : );
3134 928 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
3135 928 : StorageTimeOperation::FindGcCutoffs,
3136 928 : &tenant_id,
3137 928 : &shard_id,
3138 928 : &timeline_id,
3139 928 : );
3140 928 : let last_record_lsn_gauge = LAST_RECORD_LSN
3141 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3142 928 : .unwrap();
3143 928 :
3144 928 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
3145 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3146 928 : .unwrap();
3147 928 :
3148 928 : let pitr_history_size = PITR_HISTORY_SIZE
3149 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3150 928 : .unwrap();
3151 928 :
3152 928 : let archival_size = TIMELINE_ARCHIVE_SIZE
3153 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3154 928 : .unwrap();
3155 928 :
3156 928 : let layers_per_read = LAYERS_PER_READ
3157 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3158 928 : .unwrap();
3159 928 :
3160 928 : let standby_horizon_gauge = STANDBY_HORIZON
3161 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3162 928 : .unwrap();
3163 928 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
3164 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3165 928 : .unwrap();
3166 928 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
3167 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3168 928 : .unwrap();
3169 928 : // TODO: we shouldn't expose this metric
3170 928 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
3171 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3172 928 : .unwrap();
3173 928 : let aux_file_size_gauge = AUX_FILE_SIZE
3174 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3175 928 : .unwrap();
3176 928 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
3177 928 : let directory_entries_count_gauge_closure = {
3178 928 : let tenant_shard_id = *tenant_shard_id;
3179 928 : let timeline_id_raw = *timeline_id_raw;
3180 0 : move || {
3181 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3182 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3183 0 : let timeline_id = timeline_id_raw.to_string();
3184 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
3185 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3186 0 : .unwrap();
3187 0 : gauge
3188 0 : }
3189 : };
3190 928 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3191 928 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3192 928 : let evictions = EVICTIONS
3193 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3194 928 : .unwrap();
3195 928 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3196 928 : .build(&tenant_id, &shard_id, &timeline_id);
3197 928 :
3198 928 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3199 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3200 928 : .unwrap();
3201 928 :
3202 928 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3203 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3204 928 : .unwrap();
3205 928 :
3206 928 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3207 928 :
3208 928 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3209 928 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3210 928 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3211 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3212 928 : .unwrap(),
3213 928 : };
3214 928 :
3215 928 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3216 928 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3217 928 : .unwrap();
3218 928 :
3219 928 : let wait_ondemand_download_time =
3220 928 : wait_ondemand_download_time::WaitOndemandDownloadTimeSum::new(
3221 928 : &tenant_id,
3222 928 : &shard_id,
3223 928 : &timeline_id,
3224 928 : );
3225 928 :
3226 928 : TimelineMetrics {
3227 928 : tenant_id,
3228 928 : shard_id,
3229 928 : timeline_id,
3230 928 : flush_time_histo,
3231 928 : flush_delay_histo,
3232 928 : compact_time_histo,
3233 928 : create_images_time_histo,
3234 928 : logical_size_histo,
3235 928 : imitate_logical_size_histo,
3236 928 : garbage_collect_histo,
3237 928 : find_gc_cutoffs_histo,
3238 928 : load_layer_map_histo,
3239 928 : last_record_lsn_gauge,
3240 928 : disk_consistent_lsn_gauge,
3241 928 : pitr_history_size,
3242 928 : archival_size,
3243 928 : layers_per_read,
3244 928 : standby_horizon_gauge,
3245 928 : resident_physical_size_gauge,
3246 928 : visible_physical_size_gauge,
3247 928 : current_logical_size_gauge,
3248 928 : aux_file_size_gauge,
3249 928 : directory_entries_count_gauge,
3250 928 : evictions,
3251 928 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3252 928 : evictions_with_low_residence_duration,
3253 928 : ),
3254 928 : storage_io_size,
3255 928 : valid_lsn_lease_count_gauge,
3256 928 : wal_records_received,
3257 928 : wait_lsn_in_progress_micros,
3258 928 : wait_lsn_start_finish_counterpair,
3259 928 : wait_ondemand_download_time,
3260 928 : shutdown: std::sync::atomic::AtomicBool::default(),
3261 928 : }
3262 928 : }
3263 :
3264 3168 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3265 3168 : self.resident_physical_size_add(sz);
3266 3168 : }
3267 :
3268 1076 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3269 1076 : self.resident_physical_size_gauge.sub(sz);
3270 1076 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3271 1076 : }
3272 :
3273 3440 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3274 3440 : self.resident_physical_size_gauge.add(sz);
3275 3440 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3276 3440 : }
3277 :
3278 20 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3279 20 : self.resident_physical_size_gauge.get()
3280 20 : }
3281 :
3282 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3283 5280 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3284 5280 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3285 2847 : true => LayerLevel::L0,
3286 2433 : false => LayerLevel::L1,
3287 : };
3288 5280 : let kind = match layer_desc.is_delta() {
3289 4372 : true => LayerKind::Delta,
3290 908 : false => LayerKind::Image,
3291 : };
3292 5280 : [
3293 5280 : &self.tenant_id,
3294 5280 : &self.shard_id,
3295 5280 : &self.timeline_id,
3296 5280 : level.into(),
3297 5280 : kind.into(),
3298 5280 : ]
3299 5280 : }
3300 :
3301 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3302 4736 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3303 4736 : [
3304 4736 : &self.tenant_id,
3305 4736 : &self.shard_id,
3306 4736 : &self.timeline_id,
3307 4736 : LayerLevel::Frozen.into(),
3308 4736 : LayerKind::Delta.into(), // by definition
3309 4736 : ]
3310 4736 : }
3311 :
3312 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3313 2368 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3314 2368 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3315 2368 : let labels = self.make_frozen_layer_labels(layer);
3316 2368 : let size = layer.try_len().expect("frozen layer should have no writer");
3317 2368 : TIMELINE_LAYER_COUNT
3318 2368 : .get_metric_with_label_values(&labels)
3319 2368 : .unwrap()
3320 2368 : .dec();
3321 2368 : TIMELINE_LAYER_SIZE
3322 2368 : .get_metric_with_label_values(&labels)
3323 2368 : .unwrap()
3324 2368 : .sub(size);
3325 2368 : }
3326 :
3327 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3328 2368 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3329 2368 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3330 2368 : let labels = self.make_frozen_layer_labels(layer);
3331 2368 : let size = layer.try_len().expect("frozen layer should have no writer");
3332 2368 : TIMELINE_LAYER_COUNT
3333 2368 : .get_metric_with_label_values(&labels)
3334 2368 : .unwrap()
3335 2368 : .inc();
3336 2368 : TIMELINE_LAYER_SIZE
3337 2368 : .get_metric_with_label_values(&labels)
3338 2368 : .unwrap()
3339 2368 : .add(size);
3340 2368 : }
3341 :
3342 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3343 1376 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3344 1376 : let labels = self.make_layer_labels(layer_desc);
3345 1376 : TIMELINE_LAYER_COUNT
3346 1376 : .get_metric_with_label_values(&labels)
3347 1376 : .unwrap()
3348 1376 : .dec();
3349 1376 : TIMELINE_LAYER_SIZE
3350 1376 : .get_metric_with_label_values(&labels)
3351 1376 : .unwrap()
3352 1376 : .sub(layer_desc.file_size);
3353 1376 : }
3354 :
3355 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3356 3904 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3357 3904 : let labels = self.make_layer_labels(layer_desc);
3358 3904 : TIMELINE_LAYER_COUNT
3359 3904 : .get_metric_with_label_values(&labels)
3360 3904 : .unwrap()
3361 3904 : .inc();
3362 3904 : TIMELINE_LAYER_SIZE
3363 3904 : .get_metric_with_label_values(&labels)
3364 3904 : .unwrap()
3365 3904 : .add(layer_desc.file_size);
3366 3904 : }
3367 :
3368 20 : pub(crate) fn shutdown(&self) {
3369 20 : let was_shutdown = self
3370 20 : .shutdown
3371 20 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3372 20 :
3373 20 : if was_shutdown {
3374 : // this happens on tenant deletion because tenant first shuts down timelines, then
3375 : // invokes timeline deletion which first shuts down the timeline again.
3376 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3377 0 : return;
3378 20 : }
3379 20 :
3380 20 : let tenant_id = &self.tenant_id;
3381 20 : let timeline_id = &self.timeline_id;
3382 20 : let shard_id = &self.shard_id;
3383 20 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3384 20 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3385 20 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3386 20 : {
3387 20 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3388 20 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3389 20 : }
3390 20 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3391 20 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3392 20 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3393 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3394 20 : }
3395 :
3396 20 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3397 20 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3398 :
3399 80 : for ref level in LayerLevel::iter() {
3400 180 : for ref kind in LayerKind::iter() {
3401 120 : let labels: [&str; 5] =
3402 120 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3403 120 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3404 120 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3405 120 : }
3406 : }
3407 :
3408 20 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3409 20 :
3410 20 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3411 20 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3412 20 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3413 20 :
3414 20 : self.evictions_with_low_residence_duration
3415 20 : .write()
3416 20 : .unwrap()
3417 20 : .remove(tenant_id, shard_id, timeline_id);
3418 :
3419 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3420 : // removed at the end of it. The idea is to have the metrics outlive the
3421 : // entity during which they're observed, e.g., the smgr metrics shall
3422 : // outlive an individual smgr connection, but not the timeline.
3423 :
3424 200 : for op in StorageTimeOperation::VARIANTS {
3425 180 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3426 180 : op,
3427 180 : tenant_id,
3428 180 : shard_id,
3429 180 : timeline_id,
3430 180 : ]);
3431 180 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3432 180 : op,
3433 180 : tenant_id,
3434 180 : shard_id,
3435 180 : timeline_id,
3436 180 : ]);
3437 180 : }
3438 :
3439 60 : for op in StorageIoSizeOperation::VARIANTS {
3440 40 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3441 40 : }
3442 :
3443 : let _ =
3444 20 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3445 20 :
3446 20 : {
3447 20 : let mut res = [Ok(()), Ok(())];
3448 20 : WAIT_LSN_START_FINISH_COUNTERPAIR
3449 20 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3450 20 : }
3451 20 :
3452 20 : wait_ondemand_download_time::shutdown_timeline(tenant_id, shard_id, timeline_id);
3453 20 :
3454 20 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3455 20 : SmgrQueryType::GetPageAtLsn.into(),
3456 20 : tenant_id,
3457 20 : shard_id,
3458 20 : timeline_id,
3459 20 : ]);
3460 20 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3461 20 : SmgrQueryType::GetPageAtLsn.into(),
3462 20 : tenant_id,
3463 20 : shard_id,
3464 20 : timeline_id,
3465 20 : ]);
3466 20 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3467 20 : tenant_id,
3468 20 : shard_id,
3469 20 : timeline_id,
3470 20 : ]);
3471 20 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3472 20 : tenant_id,
3473 20 : shard_id,
3474 20 : timeline_id,
3475 20 : ]);
3476 20 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3477 20 : tenant_id,
3478 20 : shard_id,
3479 20 : timeline_id,
3480 20 : ]);
3481 20 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3482 20 : tenant_id,
3483 20 : shard_id,
3484 20 : timeline_id,
3485 20 : ]);
3486 :
3487 160 : for reason in GetPageBatchBreakReason::iter() {
3488 140 : let _ = PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.remove_label_values(&[
3489 140 : tenant_id,
3490 140 : shard_id,
3491 140 : timeline_id,
3492 140 : reason.into(),
3493 140 : ]);
3494 140 : }
3495 20 : }
3496 : }
3497 :
3498 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3499 12 : // Only shard zero deals in synthetic sizes
3500 12 : if tenant_shard_id.is_shard_zero() {
3501 12 : let tid = tenant_shard_id.tenant_id.to_string();
3502 12 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3503 12 : }
3504 :
3505 12 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3506 12 :
3507 12 : // we leave the BROKEN_TENANTS_SET entry if any
3508 12 : }
3509 :
3510 : /// Maintain a per timeline gauge in addition to the global gauge.
3511 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3512 : last_set: AtomicU64,
3513 : gauge: UIntGauge,
3514 : }
3515 :
3516 : impl PerTimelineRemotePhysicalSizeGauge {
3517 948 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3518 948 : Self {
3519 948 : last_set: AtomicU64::new(0),
3520 948 : gauge: per_timeline_gauge,
3521 948 : }
3522 948 : }
3523 3884 : pub(crate) fn set(&self, sz: u64) {
3524 3884 : self.gauge.set(sz);
3525 3884 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3526 3884 : if sz < prev {
3527 73 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3528 3811 : } else {
3529 3811 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3530 3811 : };
3531 3884 : }
3532 4 : pub(crate) fn get(&self) -> u64 {
3533 4 : self.gauge.get()
3534 4 : }
3535 : }
3536 :
3537 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3538 40 : fn drop(&mut self) {
3539 40 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3540 40 : }
3541 : }
3542 :
3543 : pub(crate) struct RemoteTimelineClientMetrics {
3544 : tenant_id: String,
3545 : shard_id: String,
3546 : timeline_id: String,
3547 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3548 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3549 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3550 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3551 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3552 : }
3553 :
3554 : impl RemoteTimelineClientMetrics {
3555 948 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3556 948 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3557 948 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3558 948 : let timeline_id_str = timeline_id.to_string();
3559 948 :
3560 948 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3561 948 : REMOTE_PHYSICAL_SIZE
3562 948 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3563 948 : .unwrap(),
3564 948 : );
3565 948 :
3566 948 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3567 948 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3568 948 : .unwrap();
3569 948 :
3570 948 : RemoteTimelineClientMetrics {
3571 948 : tenant_id: tenant_id_str,
3572 948 : shard_id: shard_id_str,
3573 948 : timeline_id: timeline_id_str,
3574 948 : calls: Mutex::new(HashMap::default()),
3575 948 : bytes_started_counter: Mutex::new(HashMap::default()),
3576 948 : bytes_finished_counter: Mutex::new(HashMap::default()),
3577 948 : remote_physical_size_gauge,
3578 948 : projected_remote_consistent_lsn_gauge,
3579 948 : }
3580 948 : }
3581 :
3582 6146 : pub fn remote_operation_time(
3583 6146 : &self,
3584 6146 : task_kind: Option<TaskKind>,
3585 6146 : file_kind: &RemoteOpFileKind,
3586 6146 : op_kind: &RemoteOpKind,
3587 6146 : status: &'static str,
3588 6146 : ) -> Histogram {
3589 6146 : REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY
3590 6146 : .get_metric_with_label_values(&[
3591 6146 : task_kind.as_ref().map(|tk| tk.into()).unwrap_or("unknown"),
3592 6146 : file_kind.as_str(),
3593 6146 : op_kind.as_str(),
3594 6146 : status,
3595 6146 : ])
3596 6146 : .unwrap()
3597 6146 : }
3598 :
3599 14487 : fn calls_counter_pair(
3600 14487 : &self,
3601 14487 : file_kind: &RemoteOpFileKind,
3602 14487 : op_kind: &RemoteOpKind,
3603 14487 : ) -> IntCounterPair {
3604 14487 : let mut guard = self.calls.lock().unwrap();
3605 14487 : let key = (file_kind.as_str(), op_kind.as_str());
3606 14487 : let metric = guard.entry(key).or_insert_with(move || {
3607 1698 : REMOTE_TIMELINE_CLIENT_CALLS
3608 1698 : .get_metric_with_label_values(&[
3609 1698 : &self.tenant_id,
3610 1698 : &self.shard_id,
3611 1698 : &self.timeline_id,
3612 1698 : key.0,
3613 1698 : key.1,
3614 1698 : ])
3615 1698 : .unwrap()
3616 14487 : });
3617 14487 : metric.clone()
3618 14487 : }
3619 :
3620 3528 : fn bytes_started_counter(
3621 3528 : &self,
3622 3528 : file_kind: &RemoteOpFileKind,
3623 3528 : op_kind: &RemoteOpKind,
3624 3528 : ) -> IntCounter {
3625 3528 : let mut guard = self.bytes_started_counter.lock().unwrap();
3626 3528 : let key = (file_kind.as_str(), op_kind.as_str());
3627 3528 : let metric = guard.entry(key).or_insert_with(move || {
3628 668 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3629 668 : .get_metric_with_label_values(&[
3630 668 : &self.tenant_id,
3631 668 : &self.shard_id,
3632 668 : &self.timeline_id,
3633 668 : key.0,
3634 668 : key.1,
3635 668 : ])
3636 668 : .unwrap()
3637 3528 : });
3638 3528 : metric.clone()
3639 3528 : }
3640 :
3641 6650 : fn bytes_finished_counter(
3642 6650 : &self,
3643 6650 : file_kind: &RemoteOpFileKind,
3644 6650 : op_kind: &RemoteOpKind,
3645 6650 : ) -> IntCounter {
3646 6650 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3647 6650 : let key = (file_kind.as_str(), op_kind.as_str());
3648 6650 : let metric = guard.entry(key).or_insert_with(move || {
3649 668 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3650 668 : .get_metric_with_label_values(&[
3651 668 : &self.tenant_id,
3652 668 : &self.shard_id,
3653 668 : &self.timeline_id,
3654 668 : key.0,
3655 668 : key.1,
3656 668 : ])
3657 668 : .unwrap()
3658 6650 : });
3659 6650 : metric.clone()
3660 6650 : }
3661 : }
3662 :
3663 : #[cfg(test)]
3664 : impl RemoteTimelineClientMetrics {
3665 12 : pub fn get_bytes_started_counter_value(
3666 12 : &self,
3667 12 : file_kind: &RemoteOpFileKind,
3668 12 : op_kind: &RemoteOpKind,
3669 12 : ) -> Option<u64> {
3670 12 : let guard = self.bytes_started_counter.lock().unwrap();
3671 12 : let key = (file_kind.as_str(), op_kind.as_str());
3672 12 : guard.get(&key).map(|counter| counter.get())
3673 12 : }
3674 :
3675 12 : pub fn get_bytes_finished_counter_value(
3676 12 : &self,
3677 12 : file_kind: &RemoteOpFileKind,
3678 12 : op_kind: &RemoteOpKind,
3679 12 : ) -> Option<u64> {
3680 12 : let guard = self.bytes_finished_counter.lock().unwrap();
3681 12 : let key = (file_kind.as_str(), op_kind.as_str());
3682 12 : guard.get(&key).map(|counter| counter.get())
3683 12 : }
3684 : }
3685 :
3686 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3687 : #[must_use]
3688 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3689 : /// Decremented on drop.
3690 : calls_counter_pair: Option<IntCounterPair>,
3691 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3692 : bytes_finished: Option<(IntCounter, u64)>,
3693 : }
3694 :
3695 : impl RemoteTimelineClientCallMetricGuard {
3696 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3697 : /// The caller vouches to do the metric updates manually.
3698 7660 : pub fn will_decrement_manually(mut self) {
3699 7660 : let RemoteTimelineClientCallMetricGuard {
3700 7660 : calls_counter_pair,
3701 7660 : bytes_finished,
3702 7660 : } = &mut self;
3703 7660 : calls_counter_pair.take();
3704 7660 : bytes_finished.take();
3705 7660 : }
3706 : }
3707 :
3708 : impl Drop for RemoteTimelineClientCallMetricGuard {
3709 7728 : fn drop(&mut self) {
3710 7728 : let RemoteTimelineClientCallMetricGuard {
3711 7728 : calls_counter_pair,
3712 7728 : bytes_finished,
3713 7728 : } = self;
3714 7728 : if let Some(guard) = calls_counter_pair.take() {
3715 68 : guard.dec();
3716 7660 : }
3717 7728 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3718 0 : bytes_finished_metric.inc_by(*value);
3719 7728 : }
3720 7728 : }
3721 : }
3722 :
3723 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3724 : /// track the byte size of this call in applicable metric(s).
3725 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3726 : /// Do not account for this call's byte size in any metrics.
3727 : /// The `reason` field is there to make the call sites self-documenting
3728 : /// about why they don't need the metric.
3729 : DontTrackSize { reason: &'static str },
3730 : /// Track the byte size of the call in applicable metric(s).
3731 : Bytes(u64),
3732 : }
3733 :
3734 : impl RemoteTimelineClientMetrics {
3735 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3736 : ///
3737 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3738 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3739 : /// is more suitable.
3740 : /// Never do both.
3741 7728 : pub(crate) fn call_begin(
3742 7728 : &self,
3743 7728 : file_kind: &RemoteOpFileKind,
3744 7728 : op_kind: &RemoteOpKind,
3745 7728 : size: RemoteTimelineClientMetricsCallTrackSize,
3746 7728 : ) -> RemoteTimelineClientCallMetricGuard {
3747 7728 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3748 7728 : calls_counter_pair.inc();
3749 :
3750 7728 : let bytes_finished = match size {
3751 4200 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3752 4200 : // nothing to do
3753 4200 : None
3754 : }
3755 3528 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3756 3528 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3757 3528 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3758 3528 : Some((finished_counter, size))
3759 : }
3760 : };
3761 7728 : RemoteTimelineClientCallMetricGuard {
3762 7728 : calls_counter_pair: Some(calls_counter_pair),
3763 7728 : bytes_finished,
3764 7728 : }
3765 7728 : }
3766 :
3767 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3768 : /// Using the guard object is generally preferable.
3769 : /// See [`call_begin`](Self::call_begin) for more context.
3770 6759 : pub(crate) fn call_end(
3771 6759 : &self,
3772 6759 : file_kind: &RemoteOpFileKind,
3773 6759 : op_kind: &RemoteOpKind,
3774 6759 : size: RemoteTimelineClientMetricsCallTrackSize,
3775 6759 : ) {
3776 6759 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3777 6759 : calls_counter_pair.dec();
3778 6759 : match size {
3779 3637 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3780 3122 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3781 3122 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3782 3122 : }
3783 : }
3784 6759 : }
3785 : }
3786 :
3787 : impl Drop for RemoteTimelineClientMetrics {
3788 40 : fn drop(&mut self) {
3789 40 : let RemoteTimelineClientMetrics {
3790 40 : tenant_id,
3791 40 : shard_id,
3792 40 : timeline_id,
3793 40 : remote_physical_size_gauge,
3794 40 : calls,
3795 40 : bytes_started_counter,
3796 40 : bytes_finished_counter,
3797 40 : projected_remote_consistent_lsn_gauge,
3798 40 : } = self;
3799 48 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3800 48 : let mut res = [Ok(()), Ok(())];
3801 48 : REMOTE_TIMELINE_CLIENT_CALLS
3802 48 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3803 48 : // don't care about results
3804 48 : }
3805 40 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3806 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3807 12 : tenant_id,
3808 12 : shard_id,
3809 12 : timeline_id,
3810 12 : a,
3811 12 : b,
3812 12 : ]);
3813 12 : }
3814 40 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3815 12 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3816 12 : tenant_id,
3817 12 : shard_id,
3818 12 : timeline_id,
3819 12 : a,
3820 12 : b,
3821 12 : ]);
3822 12 : }
3823 40 : {
3824 40 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3825 40 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3826 40 : }
3827 40 : {
3828 40 : let _ = projected_remote_consistent_lsn_gauge;
3829 40 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3830 40 : tenant_id,
3831 40 : shard_id,
3832 40 : timeline_id,
3833 40 : ]);
3834 40 : }
3835 40 : }
3836 : }
3837 :
3838 : /// Wrapper future that measures the time spent by a remote storage operation,
3839 : /// and records the time and success/failure as a prometheus metric.
3840 : pub(crate) trait MeasureRemoteOp<O, E>: Sized + Future<Output = Result<O, E>> {
3841 6455 : async fn measure_remote_op(
3842 6455 : self,
3843 6455 : task_kind: Option<TaskKind>, // not all caller contexts have a RequestContext / TaskKind handy
3844 6455 : file_kind: RemoteOpFileKind,
3845 6455 : op: RemoteOpKind,
3846 6455 : metrics: Arc<RemoteTimelineClientMetrics>,
3847 6455 : ) -> Result<O, E> {
3848 6455 : let start = Instant::now();
3849 6455 : let res = self.await;
3850 6146 : let duration = start.elapsed();
3851 6146 : let status = if res.is_ok() { &"success" } else { &"failure" };
3852 6146 : metrics
3853 6146 : .remote_operation_time(task_kind, &file_kind, &op, status)
3854 6146 : .observe(duration.as_secs_f64());
3855 6146 : res
3856 6146 : }
3857 : }
3858 :
3859 : impl<Fut, O, E> MeasureRemoteOp<O, E> for Fut where Fut: Sized + Future<Output = Result<O, E>> {}
3860 :
3861 : pub mod tokio_epoll_uring {
3862 : use std::collections::HashMap;
3863 : use std::sync::{Arc, Mutex};
3864 :
3865 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3866 : use once_cell::sync::Lazy;
3867 :
3868 : /// Shared storage for tokio-epoll-uring thread local metrics.
3869 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3870 244 : Lazy::new(|| {
3871 244 : let slots_submission_queue_depth = register_histogram!(
3872 244 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3873 244 : "The slots waiters queue depth of each tokio_epoll_uring system",
3874 244 : vec![
3875 244 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3876 244 : ],
3877 244 : )
3878 244 : .expect("failed to define a metric");
3879 244 : ThreadLocalMetricsStorage {
3880 244 : observers: Mutex::new(HashMap::new()),
3881 244 : slots_submission_queue_depth,
3882 244 : }
3883 244 : });
3884 :
3885 : pub struct ThreadLocalMetricsStorage {
3886 : /// List of thread local metrics observers.
3887 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3888 : /// A histogram shared between all thread local systems
3889 : /// for collecting slots submission queue depth.
3890 : slots_submission_queue_depth: Histogram,
3891 : }
3892 :
3893 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3894 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3895 : ///
3896 : /// The System makes observations into [`Self`] and periodically, the collector
3897 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3898 : ///
3899 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3900 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3901 : /// for cache coherence protocol to get an exclusive cache line.
3902 : pub struct ThreadLocalMetrics {
3903 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3904 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3905 : }
3906 :
3907 : impl ThreadLocalMetricsStorage {
3908 : /// Registers a new thread local system. Returns a thread local metrics observer.
3909 1004 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3910 1004 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3911 1004 : self.slots_submission_queue_depth.local(),
3912 1004 : ));
3913 1004 : let mut g = self.observers.lock().unwrap();
3914 1004 : g.insert(id, Arc::clone(&per_system_metrics));
3915 1004 : per_system_metrics
3916 1004 : }
3917 :
3918 : /// Removes metrics observer for a thread local system.
3919 : /// This should be called before dropping a thread local system.
3920 244 : pub fn remove_system(&self, id: u64) {
3921 244 : let mut g = self.observers.lock().unwrap();
3922 244 : g.remove(&id);
3923 244 : }
3924 :
3925 : /// Flush all thread local metrics to the shared storage.
3926 0 : pub fn flush_thread_local_metrics(&self) {
3927 0 : let g = self.observers.lock().unwrap();
3928 0 : g.values().for_each(|local| {
3929 0 : local.flush();
3930 0 : });
3931 0 : }
3932 : }
3933 :
3934 : impl ThreadLocalMetrics {
3935 1004 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3936 1004 : ThreadLocalMetrics {
3937 1004 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3938 1004 : }
3939 1004 : }
3940 :
3941 : /// Flushes the thread local metrics to shared aggregator.
3942 0 : pub fn flush(&self) {
3943 0 : let Self {
3944 0 : slots_submission_queue_depth,
3945 0 : } = self;
3946 0 : slots_submission_queue_depth.lock().unwrap().flush();
3947 0 : }
3948 : }
3949 :
3950 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3951 904860 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3952 904860 : let Self {
3953 904860 : slots_submission_queue_depth,
3954 904860 : } = self;
3955 904860 : slots_submission_queue_depth
3956 904860 : .lock()
3957 904860 : .unwrap()
3958 904860 : .observe(queue_depth as f64);
3959 904860 : }
3960 : }
3961 :
3962 : pub struct Collector {
3963 : descs: Vec<metrics::core::Desc>,
3964 : systems_created: UIntGauge,
3965 : systems_destroyed: UIntGauge,
3966 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3967 : }
3968 :
3969 : impl metrics::core::Collector for Collector {
3970 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3971 0 : self.descs.iter().collect()
3972 0 : }
3973 :
3974 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3975 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3976 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
3977 0 : systems_created,
3978 0 : systems_destroyed,
3979 0 : } = tokio_epoll_uring::metrics::global();
3980 0 : self.systems_created.set(systems_created);
3981 0 : mfs.extend(self.systems_created.collect());
3982 0 : self.systems_destroyed.set(systems_destroyed);
3983 0 : mfs.extend(self.systems_destroyed.collect());
3984 0 :
3985 0 : self.thread_local_metrics_storage
3986 0 : .flush_thread_local_metrics();
3987 0 :
3988 0 : mfs.extend(
3989 0 : self.thread_local_metrics_storage
3990 0 : .slots_submission_queue_depth
3991 0 : .collect(),
3992 0 : );
3993 0 : mfs
3994 0 : }
3995 : }
3996 :
3997 : impl Collector {
3998 : const NMETRICS: usize = 3;
3999 :
4000 : #[allow(clippy::new_without_default)]
4001 0 : pub fn new() -> Self {
4002 0 : let mut descs = Vec::new();
4003 0 :
4004 0 : let systems_created = UIntGauge::new(
4005 0 : "pageserver_tokio_epoll_uring_systems_created",
4006 0 : "counter of tokio-epoll-uring systems that were created",
4007 0 : )
4008 0 : .unwrap();
4009 0 : descs.extend(
4010 0 : metrics::core::Collector::desc(&systems_created)
4011 0 : .into_iter()
4012 0 : .cloned(),
4013 0 : );
4014 0 :
4015 0 : let systems_destroyed = UIntGauge::new(
4016 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
4017 0 : "counter of tokio-epoll-uring systems that were destroyed",
4018 0 : )
4019 0 : .unwrap();
4020 0 : descs.extend(
4021 0 : metrics::core::Collector::desc(&systems_destroyed)
4022 0 : .into_iter()
4023 0 : .cloned(),
4024 0 : );
4025 0 :
4026 0 : Self {
4027 0 : descs,
4028 0 : systems_created,
4029 0 : systems_destroyed,
4030 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
4031 0 : }
4032 0 : }
4033 : }
4034 :
4035 244 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4036 244 : register_int_counter!(
4037 244 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
4038 244 : "Number of times where thread_local_system creation spanned multiple executor threads",
4039 244 : )
4040 244 : .unwrap()
4041 244 : });
4042 :
4043 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4044 0 : register_int_counter!(
4045 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
4046 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
4047 0 : )
4048 0 : .unwrap()
4049 0 : });
4050 : }
4051 :
4052 : pub(crate) struct GlobalAndPerTenantIntCounter {
4053 : global: IntCounter,
4054 : per_tenant: IntCounter,
4055 : }
4056 :
4057 : impl GlobalAndPerTenantIntCounter {
4058 : #[inline(always)]
4059 0 : pub(crate) fn inc(&self) {
4060 0 : self.inc_by(1)
4061 0 : }
4062 : #[inline(always)]
4063 449777 : pub(crate) fn inc_by(&self, n: u64) {
4064 449777 : self.global.inc_by(n);
4065 449777 : self.per_tenant.inc_by(n);
4066 449777 : }
4067 : }
4068 :
4069 : pub(crate) mod tenant_throttling {
4070 : use metrics::register_int_counter_vec;
4071 : use once_cell::sync::Lazy;
4072 : use utils::shard::TenantShardId;
4073 :
4074 : use super::GlobalAndPerTenantIntCounter;
4075 :
4076 : pub(crate) struct Metrics<const KIND: usize> {
4077 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
4078 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
4079 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
4080 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
4081 : }
4082 :
4083 428 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4084 428 : register_int_counter_vec!(
4085 428 : "pageserver_tenant_throttling_count_accounted_start_global",
4086 428 : "Count of tenant throttling starts, by kind of throttle.",
4087 428 : &["kind"]
4088 428 : )
4089 428 : .unwrap()
4090 428 : });
4091 428 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4092 428 : register_int_counter_vec!(
4093 428 : "pageserver_tenant_throttling_count_accounted_start",
4094 428 : "Count of tenant throttling starts, by kind of throttle.",
4095 428 : &["kind", "tenant_id", "shard_id"]
4096 428 : )
4097 428 : .unwrap()
4098 428 : });
4099 428 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4100 428 : register_int_counter_vec!(
4101 428 : "pageserver_tenant_throttling_count_accounted_finish_global",
4102 428 : "Count of tenant throttling finishes, by kind of throttle.",
4103 428 : &["kind"]
4104 428 : )
4105 428 : .unwrap()
4106 428 : });
4107 428 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4108 428 : register_int_counter_vec!(
4109 428 : "pageserver_tenant_throttling_count_accounted_finish",
4110 428 : "Count of tenant throttling finishes, by kind of throttle.",
4111 428 : &["kind", "tenant_id", "shard_id"]
4112 428 : )
4113 428 : .unwrap()
4114 428 : });
4115 428 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4116 428 : register_int_counter_vec!(
4117 428 : "pageserver_tenant_throttling_wait_usecs_sum_global",
4118 428 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4119 428 : &["kind"]
4120 428 : )
4121 428 : .unwrap()
4122 428 : });
4123 428 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4124 428 : register_int_counter_vec!(
4125 428 : "pageserver_tenant_throttling_wait_usecs_sum",
4126 428 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4127 428 : &["kind", "tenant_id", "shard_id"]
4128 428 : )
4129 428 : .unwrap()
4130 428 : });
4131 :
4132 428 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4133 428 : register_int_counter_vec!(
4134 428 : "pageserver_tenant_throttling_count_global",
4135 428 : "Count of tenant throttlings, by kind of throttle.",
4136 428 : &["kind"]
4137 428 : )
4138 428 : .unwrap()
4139 428 : });
4140 428 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4141 428 : register_int_counter_vec!(
4142 428 : "pageserver_tenant_throttling_count",
4143 428 : "Count of tenant throttlings, by kind of throttle.",
4144 428 : &["kind", "tenant_id", "shard_id"]
4145 428 : )
4146 428 : .unwrap()
4147 428 : });
4148 :
4149 : const KINDS: &[&str] = &["pagestream"];
4150 : pub type Pagestream = Metrics<0>;
4151 :
4152 : impl<const KIND: usize> Metrics<KIND> {
4153 464 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
4154 464 : let per_tenant_label_values = &[
4155 464 : KINDS[KIND],
4156 464 : &tenant_shard_id.tenant_id.to_string(),
4157 464 : &tenant_shard_id.shard_slug().to_string(),
4158 464 : ];
4159 464 : Metrics {
4160 464 : count_accounted_start: {
4161 464 : GlobalAndPerTenantIntCounter {
4162 464 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
4163 464 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
4164 464 : .with_label_values(per_tenant_label_values),
4165 464 : }
4166 464 : },
4167 464 : count_accounted_finish: {
4168 464 : GlobalAndPerTenantIntCounter {
4169 464 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
4170 464 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
4171 464 : .with_label_values(per_tenant_label_values),
4172 464 : }
4173 464 : },
4174 464 : wait_time: {
4175 464 : GlobalAndPerTenantIntCounter {
4176 464 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
4177 464 : per_tenant: WAIT_USECS_PER_TENANT
4178 464 : .with_label_values(per_tenant_label_values),
4179 464 : }
4180 464 : },
4181 464 : count_throttled: {
4182 464 : GlobalAndPerTenantIntCounter {
4183 464 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
4184 464 : per_tenant: WAIT_COUNT_PER_TENANT
4185 464 : .with_label_values(per_tenant_label_values),
4186 464 : }
4187 464 : },
4188 464 : }
4189 464 : }
4190 : }
4191 :
4192 0 : pub(crate) fn preinitialize_global_metrics() {
4193 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4194 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4195 0 : Lazy::force(&WAIT_USECS);
4196 0 : Lazy::force(&WAIT_COUNT);
4197 0 : }
4198 :
4199 12 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4200 48 : for m in &[
4201 12 : &COUNT_ACCOUNTED_START_PER_TENANT,
4202 12 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4203 12 : &WAIT_USECS_PER_TENANT,
4204 12 : &WAIT_COUNT_PER_TENANT,
4205 12 : ] {
4206 96 : for kind in KINDS {
4207 48 : let _ = m.remove_label_values(&[
4208 48 : kind,
4209 48 : &tenant_shard_id.tenant_id.to_string(),
4210 48 : &tenant_shard_id.shard_slug().to_string(),
4211 48 : ]);
4212 48 : }
4213 : }
4214 12 : }
4215 : }
4216 :
4217 : pub(crate) mod disk_usage_based_eviction {
4218 : use super::*;
4219 :
4220 : pub(crate) struct Metrics {
4221 : pub(crate) tenant_collection_time: Histogram,
4222 : pub(crate) tenant_layer_count: Histogram,
4223 : pub(crate) layers_collected: IntCounter,
4224 : pub(crate) layers_selected: IntCounter,
4225 : pub(crate) layers_evicted: IntCounter,
4226 : }
4227 :
4228 : impl Default for Metrics {
4229 0 : fn default() -> Self {
4230 0 : let tenant_collection_time = register_histogram!(
4231 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4232 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4233 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4234 0 : )
4235 0 : .unwrap();
4236 0 :
4237 0 : let tenant_layer_count = register_histogram!(
4238 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4239 0 : "Amount of layers gathered from a tenant",
4240 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4241 0 : )
4242 0 : .unwrap();
4243 0 :
4244 0 : let layers_collected = register_int_counter!(
4245 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4246 0 : "Amount of layers collected"
4247 0 : )
4248 0 : .unwrap();
4249 0 :
4250 0 : let layers_selected = register_int_counter!(
4251 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
4252 0 : "Amount of layers selected"
4253 0 : )
4254 0 : .unwrap();
4255 0 :
4256 0 : let layers_evicted = register_int_counter!(
4257 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4258 0 : "Amount of layers successfully evicted"
4259 0 : )
4260 0 : .unwrap();
4261 0 :
4262 0 : Self {
4263 0 : tenant_collection_time,
4264 0 : tenant_layer_count,
4265 0 : layers_collected,
4266 0 : layers_selected,
4267 0 : layers_evicted,
4268 0 : }
4269 0 : }
4270 : }
4271 :
4272 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4273 : }
4274 :
4275 416 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4276 416 : register_uint_gauge_vec!(
4277 416 : "pageserver_tokio_executor_thread_configured_count",
4278 416 : "Total number of configued tokio executor threads in the process.
4279 416 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4280 416 : &["setup"],
4281 416 : )
4282 416 : .unwrap()
4283 416 : });
4284 :
4285 416 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4286 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4287 416 : let _guard = SERIALIZE.lock().unwrap();
4288 416 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4289 416 : TOKIO_EXECUTOR_THREAD_COUNT
4290 416 : .get_metric_with_label_values(&[setup])
4291 416 : .unwrap()
4292 416 : .set(u64::try_from(num_threads.get()).unwrap());
4293 416 : }
4294 :
4295 0 : static PAGESERVER_CONFIG_IGNORED_ITEMS: Lazy<UIntGaugeVec> = Lazy::new(|| {
4296 0 : register_uint_gauge_vec!(
4297 0 : "pageserver_config_ignored_items",
4298 0 : "TOML items present in the on-disk configuration file but ignored by the pageserver config parser.\
4299 0 : The `item` label is the dot-separated path of the ignored item in the on-disk configuration file.\
4300 0 : The value for an unknown config item is always 1.\
4301 0 : There is a special label value \"\", which is 0, so that there is always a metric exposed (simplifies dashboards).",
4302 0 : &["item"]
4303 0 : )
4304 0 : .unwrap()
4305 0 : });
4306 :
4307 0 : pub fn preinitialize_metrics(
4308 0 : conf: &'static PageServerConf,
4309 0 : ignored: config::ignored_fields::Paths,
4310 0 : ) {
4311 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4312 0 :
4313 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4314 0 : .with_label_values(&[""])
4315 0 : .set(0);
4316 0 : for path in &ignored.paths {
4317 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4318 0 : .with_label_values(&[path])
4319 0 : .set(1);
4320 0 : }
4321 :
4322 : // Python tests need these and on some we do alerting.
4323 : //
4324 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4325 : // order:
4326 : // - global metrics reside in a Lazy<PageserverMetrics>
4327 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4328 : // - could move the statics into TimelineMetrics::new()?
4329 :
4330 : // counters
4331 0 : [
4332 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4333 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4334 0 : &WALRECEIVER_BROKER_UPDATES,
4335 0 : &WALRECEIVER_CANDIDATES_ADDED,
4336 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4337 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4338 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4339 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4340 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4341 0 : &CIRCUIT_BREAKERS_BROKEN,
4342 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4343 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4344 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4345 0 : ]
4346 0 : .into_iter()
4347 0 : .for_each(|c| {
4348 0 : Lazy::force(c);
4349 0 : });
4350 0 :
4351 0 : // Deletion queue stats
4352 0 : Lazy::force(&DELETION_QUEUE);
4353 0 :
4354 0 : // Tenant stats
4355 0 : Lazy::force(&TENANT);
4356 0 :
4357 0 : // Tenant manager stats
4358 0 : Lazy::force(&TENANT_MANAGER);
4359 0 :
4360 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4361 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4362 :
4363 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4364 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4365 0 : // values from last restart.
4366 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4367 0 : }
4368 :
4369 : // countervecs
4370 0 : [
4371 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4372 0 : &SMGR_QUERY_STARTED_GLOBAL,
4373 0 : &PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL,
4374 0 : ]
4375 0 : .into_iter()
4376 0 : .for_each(|c| {
4377 0 : Lazy::force(c);
4378 0 : });
4379 0 :
4380 0 : // gauges
4381 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4382 0 :
4383 0 : // histograms
4384 0 : [
4385 0 : &LAYERS_PER_READ_GLOBAL,
4386 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4387 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4388 0 : &DELTAS_PER_READ_GLOBAL,
4389 0 : &WAIT_LSN_TIME,
4390 0 : &WAL_REDO_TIME,
4391 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4392 0 : &WAL_REDO_BYTES_HISTOGRAM,
4393 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4394 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4395 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4396 0 : ]
4397 0 : .into_iter()
4398 0 : .for_each(|h| {
4399 0 : Lazy::force(h);
4400 0 : });
4401 0 :
4402 0 : // Custom
4403 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4404 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4405 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4406 0 :
4407 0 : tenant_throttling::preinitialize_global_metrics();
4408 0 : wait_ondemand_download_time::preinitialize_global_metrics();
4409 0 : }
|