Line data Source code
1 : use std::collections::HashMap;
2 : use std::num::NonZeroUsize;
3 : use std::os::fd::RawFd;
4 : use std::sync::atomic::AtomicU64;
5 : use std::sync::{Arc, Mutex};
6 : use std::time::{Duration, Instant};
7 :
8 : use enum_map::{Enum as _, EnumMap};
9 : use futures::Future;
10 : use metrics::{
11 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
12 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
13 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
14 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
15 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
16 : };
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::config::{
19 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
20 : PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy,
21 : };
22 : use pageserver_api::models::InMemoryLayerInfo;
23 : use pageserver_api::shard::TenantShardId;
24 : use postgres_backend::{QueryError, is_expected_io_error};
25 : use pq_proto::framed::ConnectionError;
26 : use strum::{EnumCount, IntoEnumIterator as _, VariantNames};
27 : use strum_macros::{IntoStaticStr, VariantNames};
28 : use utils::id::TimelineId;
29 :
30 : use crate::config;
31 : use crate::config::PageServerConf;
32 : use crate::context::{PageContentKind, RequestContext};
33 : use crate::pgdatadir_mapping::DatadirModificationStats;
34 : use crate::task_mgr::TaskKind;
35 : use crate::tenant::Timeline;
36 : use crate::tenant::layer_map::LayerMap;
37 : use crate::tenant::mgr::TenantSlot;
38 : use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc};
39 : use crate::tenant::tasks::BackgroundLoopKind;
40 : use crate::tenant::throttle::ThrottleResult;
41 :
42 : /// Prometheus histogram buckets (in seconds) for operations in the critical
43 : /// path. In other words, operations that directly affect that latency of user
44 : /// queries.
45 : ///
46 : /// The buckets capture the majority of latencies in the microsecond and
47 : /// millisecond range but also extend far enough up to distinguish "bad" from
48 : /// "really bad".
49 : const CRITICAL_OP_BUCKETS: &[f64] = &[
50 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
51 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
52 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
53 : ];
54 :
55 : // Metrics collected on operations on the storage repository.
56 : #[derive(Debug, VariantNames, IntoStaticStr)]
57 : #[strum(serialize_all = "kebab_case")]
58 : pub(crate) enum StorageTimeOperation {
59 : #[strum(serialize = "layer flush")]
60 : LayerFlush,
61 :
62 : #[strum(serialize = "layer flush delay")]
63 : LayerFlushDelay,
64 :
65 : #[strum(serialize = "compact")]
66 : Compact,
67 :
68 : #[strum(serialize = "create images")]
69 : CreateImages,
70 :
71 : #[strum(serialize = "logical size")]
72 : LogicalSize,
73 :
74 : #[strum(serialize = "imitate logical size")]
75 : ImitateLogicalSize,
76 :
77 : #[strum(serialize = "load layer map")]
78 : LoadLayerMap,
79 :
80 : #[strum(serialize = "gc")]
81 : Gc,
82 :
83 : #[strum(serialize = "find gc cutoffs")]
84 : FindGcCutoffs,
85 : }
86 :
87 1284 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
88 1284 : register_counter_vec!(
89 1284 : "pageserver_storage_operations_seconds_sum",
90 1284 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
91 1284 : &["operation", "tenant_id", "shard_id", "timeline_id"],
92 1284 : )
93 1284 : .expect("failed to define a metric")
94 1284 : });
95 :
96 1284 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
97 1284 : register_int_counter_vec!(
98 1284 : "pageserver_storage_operations_seconds_count",
99 1284 : "Count of storage operations with operation, tenant and timeline dimensions",
100 1284 : &["operation", "tenant_id", "shard_id", "timeline_id"],
101 1284 : )
102 1284 : .expect("failed to define a metric")
103 1284 : });
104 :
105 : // Buckets for background operation duration in seconds, like compaction, GC, size calculation.
106 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
107 :
108 1284 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
109 1284 : register_histogram_vec!(
110 1284 : "pageserver_storage_operations_seconds_global",
111 1284 : "Time spent on storage operations",
112 1284 : &["operation"],
113 1284 : STORAGE_OP_BUCKETS.into(),
114 1284 : )
115 1284 : .expect("failed to define a metric")
116 1284 : });
117 :
118 : /// Measures layers visited per read (i.e. read amplification).
119 : ///
120 : /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits
121 : /// are amortized across the batch, and some layers may not intersect with a given key, each visited
122 : /// layer contributes directly to the observed latency for every read in the batch, which is what we
123 : /// care about.
124 1284 : pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| {
125 1284 : register_histogram_vec!(
126 1284 : "pageserver_layers_per_read",
127 1284 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
128 1284 : &["tenant_id", "shard_id", "timeline_id"],
129 1284 : // Low resolution to reduce cardinality.
130 1284 : vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
131 1284 : )
132 1284 : .expect("failed to define a metric")
133 1284 : });
134 :
135 1260 : pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
136 1260 : register_histogram!(
137 1260 : "pageserver_layers_per_read_global",
138 1260 : "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.",
139 1260 : vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
140 1260 : )
141 1260 : .expect("failed to define a metric")
142 1260 : });
143 :
144 1260 : pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
145 1260 : register_histogram!(
146 1260 : "pageserver_layers_per_read_batch_global",
147 1260 : "Layers visited to serve a single read batch (read amplification), regardless of number of reads.",
148 1260 : vec![
149 1260 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
150 1260 : ],
151 1260 : )
152 1260 : .expect("failed to define a metric")
153 1260 : });
154 :
155 1260 : pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
156 1260 : register_histogram!(
157 1260 : "pageserver_layers_per_read_amortized_global",
158 1260 : "Layers visited to serve a single read (read amplification). Amortized across a batch: \
159 1260 : all visited layers are divided by number of reads.",
160 1260 : vec![
161 1260 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
162 1260 : ],
163 1260 : )
164 1260 : .expect("failed to define a metric")
165 1260 : });
166 :
167 1260 : pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
168 1260 : // We expect this to be low because of Postgres checkpoints. Let's see if that holds.
169 1260 : register_histogram!(
170 1260 : "pageserver_deltas_per_read_global",
171 1260 : "Number of delta pages applied to image page per read",
172 1260 : vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
173 1260 : )
174 1260 : .expect("failed to define a metric")
175 1260 : });
176 :
177 0 : pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| {
178 0 : register_uint_gauge!(
179 0 : "pageserver_concurrent_initdb",
180 0 : "Number of initdb processes running"
181 0 : )
182 0 : .expect("failed to define a metric")
183 0 : });
184 :
185 0 : pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| {
186 0 : register_histogram!(
187 0 : "pageserver_initdb_semaphore_seconds_global",
188 0 : "Time spent getting a permit from the global initdb semaphore",
189 0 : STORAGE_OP_BUCKETS.into()
190 0 : )
191 0 : .expect("failed to define metric")
192 0 : });
193 :
194 0 : pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
195 0 : register_histogram!(
196 0 : "pageserver_initdb_seconds_global",
197 0 : "Time spent performing initdb",
198 0 : STORAGE_OP_BUCKETS.into()
199 0 : )
200 0 : .expect("failed to define metric")
201 0 : });
202 :
203 : pub(crate) struct GetVectoredLatency {
204 : map: EnumMap<TaskKind, Option<Histogram>>,
205 : }
206 :
207 : #[allow(dead_code)]
208 : pub(crate) struct ScanLatency {
209 : map: EnumMap<TaskKind, Option<Histogram>>,
210 : }
211 :
212 : impl GetVectoredLatency {
213 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
214 : // cardinality of the metric.
215 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
216 :
217 130584 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
218 130584 : self.map[task_kind].as_ref()
219 130584 : }
220 : }
221 :
222 : impl ScanLatency {
223 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
224 : // cardinality of the metric.
225 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
226 :
227 96 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
228 96 : self.map[task_kind].as_ref()
229 96 : }
230 : }
231 :
232 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
233 : parent: &'a Histogram,
234 : start: std::time::Instant,
235 : }
236 :
237 : impl<'a> ScanLatencyOngoingRecording<'a> {
238 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
239 0 : let start = Instant::now();
240 0 : ScanLatencyOngoingRecording { parent, start }
241 0 : }
242 :
243 0 : pub(crate) fn observe(self) {
244 0 : let elapsed = self.start.elapsed();
245 0 : self.parent.observe(elapsed.as_secs_f64());
246 0 : }
247 : }
248 :
249 1236 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
250 1236 : let inner = register_histogram_vec!(
251 1236 : "pageserver_get_vectored_seconds",
252 1236 : "Time spent in get_vectored.",
253 1236 : &["task_kind"],
254 1236 : CRITICAL_OP_BUCKETS.into(),
255 1236 : )
256 1236 : .expect("failed to define a metric");
257 1236 :
258 1236 : GetVectoredLatency {
259 38316 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
260 38316 : let task_kind = TaskKind::from_usize(task_kind_idx);
261 38316 :
262 38316 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
263 2472 : let task_kind = task_kind.into();
264 2472 : Some(inner.with_label_values(&[task_kind]))
265 : } else {
266 35844 : None
267 : }
268 38316 : })),
269 1236 : }
270 1236 : });
271 :
272 36 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
273 36 : let inner = register_histogram_vec!(
274 36 : "pageserver_scan_seconds",
275 36 : "Time spent in scan.",
276 36 : &["task_kind"],
277 36 : CRITICAL_OP_BUCKETS.into(),
278 36 : )
279 36 : .expect("failed to define a metric");
280 36 :
281 36 : ScanLatency {
282 1116 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
283 1116 : let task_kind = TaskKind::from_usize(task_kind_idx);
284 1116 :
285 1116 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
286 36 : let task_kind = task_kind.into();
287 36 : Some(inner.with_label_values(&[task_kind]))
288 : } else {
289 1080 : None
290 : }
291 1116 : })),
292 36 : }
293 36 : });
294 :
295 : pub(crate) struct PageCacheMetricsForTaskKind {
296 : pub read_accesses_immutable: IntCounter,
297 : pub read_hits_immutable: IntCounter,
298 : }
299 :
300 : pub(crate) struct PageCacheMetrics {
301 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
302 : }
303 :
304 600 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
305 600 : register_int_counter_vec!(
306 600 : "pageserver_page_cache_read_hits_total",
307 600 : "Number of read accesses to the page cache that hit",
308 600 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
309 600 : )
310 600 : .expect("failed to define a metric")
311 600 : });
312 :
313 600 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
314 600 : register_int_counter_vec!(
315 600 : "pageserver_page_cache_read_accesses_total",
316 600 : "Number of read accesses to the page cache",
317 600 : &["task_kind", "key_kind", "content_kind"]
318 600 : )
319 600 : .expect("failed to define a metric")
320 600 : });
321 :
322 600 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
323 18600 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
324 18600 : let task_kind = TaskKind::from_usize(task_kind);
325 18600 : let task_kind: &'static str = task_kind.into();
326 148800 : EnumMap::from_array(std::array::from_fn(|content_kind| {
327 148800 : let content_kind = PageContentKind::from_usize(content_kind);
328 148800 : let content_kind: &'static str = content_kind.into();
329 148800 : PageCacheMetricsForTaskKind {
330 148800 : read_accesses_immutable: {
331 148800 : PAGE_CACHE_READ_ACCESSES
332 148800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
333 148800 : .unwrap()
334 148800 : },
335 148800 :
336 148800 : read_hits_immutable: {
337 148800 : PAGE_CACHE_READ_HITS
338 148800 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
339 148800 : .unwrap()
340 148800 : },
341 148800 : }
342 148800 : }))
343 18600 : })),
344 600 : });
345 :
346 : impl PageCacheMetrics {
347 6757112 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
348 6757112 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
349 6757112 : }
350 : }
351 :
352 : pub(crate) struct PageCacheSizeMetrics {
353 : pub max_bytes: UIntGauge,
354 :
355 : pub current_bytes_immutable: UIntGauge,
356 : }
357 :
358 600 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
359 600 : register_uint_gauge_vec!(
360 600 : "pageserver_page_cache_size_current_bytes",
361 600 : "Current size of the page cache in bytes, by key kind",
362 600 : &["key_kind"]
363 600 : )
364 600 : .expect("failed to define a metric")
365 600 : });
366 :
367 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
368 600 : Lazy::new(|| PageCacheSizeMetrics {
369 600 : max_bytes: {
370 600 : register_uint_gauge!(
371 600 : "pageserver_page_cache_size_max_bytes",
372 600 : "Maximum size of the page cache in bytes"
373 600 : )
374 600 : .expect("failed to define a metric")
375 600 : },
376 600 : current_bytes_immutable: {
377 600 : PAGE_CACHE_SIZE_CURRENT_BYTES
378 600 : .get_metric_with_label_values(&["immutable"])
379 600 : .unwrap()
380 600 : },
381 600 : });
382 :
383 : pub(crate) mod page_cache_eviction_metrics {
384 : use std::num::NonZeroUsize;
385 :
386 : use metrics::{IntCounter, IntCounterVec, register_int_counter_vec};
387 : use once_cell::sync::Lazy;
388 :
389 : #[derive(Clone, Copy)]
390 : pub(crate) enum Outcome {
391 : FoundSlotUnused { iters: NonZeroUsize },
392 : FoundSlotEvicted { iters: NonZeroUsize },
393 : ItersExceeded { iters: NonZeroUsize },
394 : }
395 :
396 600 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
397 600 : register_int_counter_vec!(
398 600 : "pageserver_page_cache_find_victim_iters_total",
399 600 : "Counter for the number of iterations in the find_victim loop",
400 600 : &["outcome"],
401 600 : )
402 600 : .expect("failed to define a metric")
403 600 : });
404 :
405 600 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
406 600 : register_int_counter_vec!(
407 600 : "pageserver_page_cache_find_victim_calls",
408 600 : "Incremented at the end of each find_victim() call.\
409 600 : Filter by outcome to get e.g., eviction rate.",
410 600 : &["outcome"]
411 600 : )
412 600 : .unwrap()
413 600 : });
414 :
415 185691 : pub(crate) fn observe(outcome: Outcome) {
416 : macro_rules! dry {
417 : ($label:literal, $iters:expr) => {{
418 : static LABEL: &'static str = $label;
419 : static ITERS_TOTAL: Lazy<IntCounter> =
420 708 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
421 : static CALLS: Lazy<IntCounter> =
422 708 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
423 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
424 : CALLS.inc();
425 : }};
426 : }
427 185691 : match outcome {
428 9840 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
429 175851 : Outcome::FoundSlotEvicted { iters } => {
430 175851 : dry!("found_evicted", iters)
431 : }
432 0 : Outcome::ItersExceeded { iters } => {
433 0 : dry!("err_iters_exceeded", iters);
434 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
435 0 : }
436 : }
437 185691 : }
438 : }
439 :
440 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
441 0 : register_int_counter_vec!(
442 0 : "page_cache_errors_total",
443 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
444 0 : &["error_kind"]
445 0 : )
446 0 : .expect("failed to define a metric")
447 0 : });
448 :
449 : #[derive(IntoStaticStr)]
450 : #[strum(serialize_all = "kebab_case")]
451 : pub(crate) enum PageCacheErrorKind {
452 : AcquirePinnedSlotTimeout,
453 : EvictIterLimit,
454 : }
455 :
456 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
457 0 : PAGE_CACHE_ERRORS
458 0 : .get_metric_with_label_values(&[error_kind.into()])
459 0 : .unwrap()
460 0 : .inc();
461 0 : }
462 :
463 132 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
464 132 : register_histogram!(
465 132 : "pageserver_wait_lsn_seconds",
466 132 : "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.",
467 132 : CRITICAL_OP_BUCKETS.into(),
468 132 : )
469 132 : .expect("failed to define a metric")
470 132 : });
471 :
472 1284 : pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| {
473 1284 : register_int_counter_pair_vec!(
474 1284 : "pageserver_wait_lsn_started_count",
475 1284 : "Number of wait_lsn operations started.",
476 1284 : "pageserver_wait_lsn_finished_count",
477 1284 : "Number of wait_lsn operations finished.",
478 1284 : &["tenant_id", "shard_id", "timeline_id"],
479 1284 : )
480 1284 : .expect("failed to define a metric")
481 1284 : });
482 :
483 1284 : pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
484 1284 : register_int_counter_vec!(
485 1284 : "pageserver_wait_lsn_in_progress_micros",
486 1284 : "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.",
487 1284 : &["tenant_id", "shard_id", "timeline_id"],
488 1284 : )
489 1284 : .expect("failed to define a metric")
490 1284 : });
491 :
492 1284 : pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| {
493 1284 : register_int_counter!(
494 1284 : "pageserver_wait_lsn_in_progress_micros_global",
495 1284 : "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting."
496 1284 : )
497 1284 : .expect("failed to define a metric")
498 1284 : });
499 :
500 36 : pub(crate) static ONDEMAND_DOWNLOAD_BYTES: Lazy<IntCounterVec> = Lazy::new(|| {
501 36 : register_int_counter_vec!(
502 36 : "pageserver_ondemand_download_bytes_total",
503 36 : "Total bytes of layers on-demand downloaded",
504 36 : &["task_kind"]
505 36 : )
506 36 : .expect("failed to define a metric")
507 36 : });
508 :
509 36 : pub(crate) static ONDEMAND_DOWNLOAD_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
510 36 : register_int_counter_vec!(
511 36 : "pageserver_ondemand_download_count",
512 36 : "Total count of layers on-demand downloaded",
513 36 : &["task_kind"]
514 36 : )
515 36 : .expect("failed to define a metric")
516 36 : });
517 :
518 : pub(crate) mod wait_ondemand_download_time {
519 : use super::*;
520 : const WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS: &[f64] = &[
521 : 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, // 10 ms - 100ms
522 : 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, // 100ms to 1s
523 : 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, // 1s to 10s
524 : 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, // 10s to 1m
525 : ];
526 :
527 : /// The task kinds for which we want to track wait times for on-demand downloads.
528 : /// Other task kinds' wait times are accumulated in label value `unknown`.
529 : pub(crate) const WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS: [TaskKind; 2] = [
530 : TaskKind::PageRequestHandler,
531 : TaskKind::WalReceiverConnectionHandler,
532 : ];
533 :
534 0 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL: Lazy<Vec<Histogram>> = Lazy::new(|| {
535 0 : let histo = register_histogram_vec!(
536 0 : "pageserver_wait_ondemand_download_seconds_global",
537 0 : "Observations are individual tasks' wait times for on-demand downloads. \
538 0 : If N tasks coalesce on an on-demand download, and it takes 10s, than we observe N * 10s.",
539 0 : &["task_kind"],
540 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS.into(),
541 0 : )
542 0 : .expect("failed to define a metric");
543 0 : WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
544 0 : .iter()
545 0 : .map(|task_kind| histo.with_label_values(&[task_kind.into()]))
546 0 : .collect::<Vec<_>>()
547 0 : });
548 :
549 1284 : pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_SUM: Lazy<CounterVec> = Lazy::new(|| {
550 1284 : register_counter_vec!(
551 1284 : // use a name that _could_ be evolved into a per-timeline histogram later
552 1284 : "pageserver_wait_ondemand_download_seconds_sum",
553 1284 : "Like `pageserver_wait_ondemand_download_seconds_global` but per timeline",
554 1284 : &["tenant_id", "shard_id", "timeline_id", "task_kind"],
555 1284 : )
556 1284 : .unwrap()
557 1284 : });
558 :
559 : pub struct WaitOndemandDownloadTimeSum {
560 : counters: [Counter; WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS.len()],
561 : }
562 :
563 : impl WaitOndemandDownloadTimeSum {
564 2796 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
565 2796 : let counters = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
566 2796 : .iter()
567 5592 : .map(|task_kind| {
568 5592 : WAIT_ONDEMAND_DOWNLOAD_TIME_SUM
569 5592 : .get_metric_with_label_values(&[
570 5592 : tenant_id,
571 5592 : shard_id,
572 5592 : timeline_id,
573 5592 : task_kind.into(),
574 5592 : ])
575 5592 : .unwrap()
576 5592 : })
577 2796 : .collect::<Vec<_>>();
578 2796 : Self {
579 2796 : counters: counters.try_into().unwrap(),
580 2796 : }
581 2796 : }
582 144 : pub(crate) fn observe(&self, task_kind: TaskKind, duration: Duration) {
583 144 : let maybe = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS
584 144 : .iter()
585 144 : .enumerate()
586 288 : .find(|(_, kind)| **kind == task_kind);
587 144 : let Some((idx, _)) = maybe else {
588 144 : return;
589 : };
590 0 : WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL[idx].observe(duration.as_secs_f64());
591 0 : let counter = &self.counters[idx];
592 0 : counter.inc_by(duration.as_secs_f64());
593 144 : }
594 : }
595 :
596 60 : pub(crate) fn shutdown_timeline(tenant_id: &str, shard_id: &str, timeline_id: &str) {
597 180 : for task_kind in WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS {
598 120 : let _ = WAIT_ONDEMAND_DOWNLOAD_TIME_SUM.remove_label_values(&[
599 120 : tenant_id,
600 120 : shard_id,
601 120 : timeline_id,
602 120 : task_kind.into(),
603 120 : ]);
604 120 : }
605 60 : }
606 :
607 0 : pub(crate) fn preinitialize_global_metrics() {
608 0 : Lazy::force(&WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL);
609 0 : }
610 : }
611 :
612 1284 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
613 1284 : register_int_gauge_vec!(
614 1284 : "pageserver_last_record_lsn",
615 1284 : "Last record LSN grouped by timeline",
616 1284 : &["tenant_id", "shard_id", "timeline_id"]
617 1284 : )
618 1284 : .expect("failed to define a metric")
619 1284 : });
620 :
621 1284 : static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
622 1284 : register_int_gauge_vec!(
623 1284 : "pageserver_disk_consistent_lsn",
624 1284 : "Disk consistent LSN grouped by timeline",
625 1284 : &["tenant_id", "shard_id", "timeline_id"]
626 1284 : )
627 1284 : .expect("failed to define a metric")
628 1284 : });
629 :
630 1284 : pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| {
631 1284 : register_uint_gauge_vec!(
632 1284 : "pageserver_projected_remote_consistent_lsn",
633 1284 : "Projected remote consistent LSN grouped by timeline",
634 1284 : &["tenant_id", "shard_id", "timeline_id"]
635 1284 : )
636 1284 : .expect("failed to define a metric")
637 1284 : });
638 :
639 1284 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
640 1284 : register_uint_gauge_vec!(
641 1284 : "pageserver_pitr_history_size",
642 1284 : "Data written since PITR cutoff on this timeline",
643 1284 : &["tenant_id", "shard_id", "timeline_id"]
644 1284 : )
645 1284 : .expect("failed to define a metric")
646 1284 : });
647 :
648 : #[derive(
649 720 : strum_macros::EnumIter,
650 0 : strum_macros::EnumString,
651 : strum_macros::Display,
652 : strum_macros::IntoStaticStr,
653 : )]
654 : #[strum(serialize_all = "kebab_case")]
655 : pub(crate) enum LayerKind {
656 : Delta,
657 : Image,
658 : }
659 :
660 : #[derive(
661 300 : strum_macros::EnumIter,
662 0 : strum_macros::EnumString,
663 : strum_macros::Display,
664 : strum_macros::IntoStaticStr,
665 : )]
666 : #[strum(serialize_all = "kebab_case")]
667 : pub(crate) enum LayerLevel {
668 : // We don't track the currently open ephemeral layer, since there's always exactly 1 and its
669 : // size changes. See `TIMELINE_EPHEMERAL_BYTES`.
670 : Frozen,
671 : L0,
672 : L1,
673 : }
674 :
675 1260 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
676 1260 : register_uint_gauge_vec!(
677 1260 : "pageserver_layer_bytes",
678 1260 : "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)",
679 1260 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
680 1260 : )
681 1260 : .expect("failed to define a metric")
682 1260 : });
683 :
684 1260 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
685 1260 : register_uint_gauge_vec!(
686 1260 : "pageserver_layer_count",
687 1260 : "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)",
688 1260 : &["tenant_id", "shard_id", "timeline_id", "level", "kind"]
689 1260 : )
690 1260 : .expect("failed to define a metric")
691 1260 : });
692 :
693 1284 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
694 1284 : register_uint_gauge_vec!(
695 1284 : "pageserver_archive_size",
696 1284 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
697 1284 : &["tenant_id", "shard_id", "timeline_id"]
698 1284 : )
699 1284 : .expect("failed to define a metric")
700 1284 : });
701 :
702 1284 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
703 1284 : register_int_gauge_vec!(
704 1284 : "pageserver_standby_horizon",
705 1284 : "Standby apply LSN for which GC is hold off, by timeline.",
706 1284 : &["tenant_id", "shard_id", "timeline_id"]
707 1284 : )
708 1284 : .expect("failed to define a metric")
709 1284 : });
710 :
711 1284 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
712 1284 : register_uint_gauge_vec!(
713 1284 : "pageserver_resident_physical_size",
714 1284 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
715 1284 : &["tenant_id", "shard_id", "timeline_id"]
716 1284 : )
717 1284 : .expect("failed to define a metric")
718 1284 : });
719 :
720 1284 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
721 1284 : register_uint_gauge_vec!(
722 1284 : "pageserver_visible_physical_size",
723 1284 : "The size of the layer files present in the pageserver's filesystem.",
724 1284 : &["tenant_id", "shard_id", "timeline_id"]
725 1284 : )
726 1284 : .expect("failed to define a metric")
727 1284 : });
728 :
729 1260 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
730 1260 : register_uint_gauge!(
731 1260 : "pageserver_resident_physical_size_global",
732 1260 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
733 1260 : )
734 1260 : .expect("failed to define a metric")
735 1260 : });
736 :
737 1284 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
738 1284 : register_uint_gauge_vec!(
739 1284 : "pageserver_remote_physical_size",
740 1284 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
741 1284 : // Corollary: If any files are missing from the index part, they won't be included here.
742 1284 : &["tenant_id", "shard_id", "timeline_id"]
743 1284 : )
744 1284 : .expect("failed to define a metric")
745 1284 : });
746 :
747 1284 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
748 1284 : register_uint_gauge!(
749 1284 : "pageserver_remote_physical_size_global",
750 1284 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
751 1284 : )
752 1284 : .expect("failed to define a metric")
753 1284 : });
754 :
755 36 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
756 36 : register_int_counter!(
757 36 : "pageserver_remote_ondemand_downloaded_layers_total",
758 36 : "Total on-demand downloaded layers"
759 36 : )
760 36 : .unwrap()
761 36 : });
762 :
763 36 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
764 36 : register_int_counter!(
765 36 : "pageserver_remote_ondemand_downloaded_bytes_total",
766 36 : "Total bytes of layers on-demand downloaded",
767 36 : )
768 36 : .unwrap()
769 36 : });
770 :
771 1284 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
772 1284 : register_uint_gauge_vec!(
773 1284 : "pageserver_current_logical_size",
774 1284 : "Current logical size grouped by timeline",
775 1284 : &["tenant_id", "shard_id", "timeline_id"]
776 1284 : )
777 1284 : .expect("failed to define current logical size metric")
778 1284 : });
779 :
780 1284 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
781 1284 : register_int_gauge_vec!(
782 1284 : "pageserver_aux_file_estimated_size",
783 1284 : "The size of all aux files for a timeline in aux file v2 store.",
784 1284 : &["tenant_id", "shard_id", "timeline_id"]
785 1284 : )
786 1284 : .expect("failed to define a metric")
787 1284 : });
788 :
789 1284 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
790 1284 : register_uint_gauge_vec!(
791 1284 : "pageserver_valid_lsn_lease_count",
792 1284 : "The number of valid leases after refreshing gc info.",
793 1284 : &["tenant_id", "shard_id", "timeline_id"],
794 1284 : )
795 1284 : .expect("failed to define a metric")
796 1284 : });
797 :
798 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
799 0 : register_int_counter!(
800 0 : "pageserver_circuit_breaker_broken",
801 0 : "How many times a circuit breaker has broken"
802 0 : )
803 0 : .expect("failed to define a metric")
804 0 : });
805 :
806 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
807 0 : register_int_counter!(
808 0 : "pageserver_circuit_breaker_unbroken",
809 0 : "How many times a circuit breaker has been un-broken (recovered)"
810 0 : )
811 0 : .expect("failed to define a metric")
812 0 : });
813 :
814 1236 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
815 1236 : register_int_counter!(
816 1236 : "pageserver_compression_image_in_bytes_total",
817 1236 : "Size of data written into image layers before compression"
818 1236 : )
819 1236 : .expect("failed to define a metric")
820 1236 : });
821 :
822 1236 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
823 1236 : register_int_counter!(
824 1236 : "pageserver_compression_image_in_bytes_considered",
825 1236 : "Size of potentially compressible data written into image layers before compression"
826 1236 : )
827 1236 : .expect("failed to define a metric")
828 1236 : });
829 :
830 1236 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
831 1236 : register_int_counter!(
832 1236 : "pageserver_compression_image_in_bytes_chosen",
833 1236 : "Size of data whose compressed form was written into image layers"
834 1236 : )
835 1236 : .expect("failed to define a metric")
836 1236 : });
837 :
838 1236 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
839 1236 : register_int_counter!(
840 1236 : "pageserver_compression_image_out_bytes_total",
841 1236 : "Size of compressed image layer written"
842 1236 : )
843 1236 : .expect("failed to define a metric")
844 1236 : });
845 :
846 60 : pub(crate) static RELSIZE_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| {
847 60 : register_uint_gauge!(
848 60 : "pageserver_relsize_cache_entries",
849 60 : "Number of entries in the relation size cache",
850 60 : )
851 60 : .expect("failed to define a metric")
852 60 : });
853 :
854 60 : pub(crate) static RELSIZE_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| {
855 60 : register_int_counter!("pageserver_relsize_cache_hits", "Relation size cache hits",)
856 60 : .expect("failed to define a metric")
857 60 : });
858 :
859 60 : pub(crate) static RELSIZE_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| {
860 60 : register_int_counter!(
861 60 : "pageserver_relsize_cache_misses",
862 60 : "Relation size cache misses",
863 60 : )
864 60 : .expect("failed to define a metric")
865 60 : });
866 :
867 24 : pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| {
868 24 : register_int_counter!(
869 24 : "pageserver_relsize_cache_misses_old",
870 24 : "Relation size cache misses where the lookup LSN is older than the last relation update"
871 24 : )
872 24 : .expect("failed to define a metric")
873 24 : });
874 :
875 : pub(crate) mod initial_logical_size {
876 : use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec};
877 : use once_cell::sync::Lazy;
878 :
879 : pub(crate) struct StartCalculation(IntCounterVec);
880 1284 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
881 1284 : StartCalculation(
882 1284 : register_int_counter_vec!(
883 1284 : "pageserver_initial_logical_size_start_calculation",
884 1284 : "Incremented each time we start an initial logical size calculation attempt. \
885 1284 : The `circumstances` label provides some additional details.",
886 1284 : &["attempt", "circumstances"]
887 1284 : )
888 1284 : .unwrap(),
889 1284 : )
890 1284 : });
891 :
892 : struct DropCalculation {
893 : first: IntCounter,
894 : retry: IntCounter,
895 : }
896 :
897 1284 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
898 1284 : let vec = register_int_counter_vec!(
899 1284 : "pageserver_initial_logical_size_drop_calculation",
900 1284 : "Incremented each time we abort a started size calculation attmpt.",
901 1284 : &["attempt"]
902 1284 : )
903 1284 : .unwrap();
904 1284 : DropCalculation {
905 1284 : first: vec.with_label_values(&["first"]),
906 1284 : retry: vec.with_label_values(&["retry"]),
907 1284 : }
908 1284 : });
909 :
910 : pub(crate) struct Calculated {
911 : pub(crate) births: IntCounter,
912 : pub(crate) deaths: IntCounter,
913 : }
914 :
915 1284 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
916 1284 : births: register_int_counter!(
917 1284 : "pageserver_initial_logical_size_finish_calculation",
918 1284 : "Incremented every time we finish calculation of initial logical size.\
919 1284 : If everything is working well, this should happen at most once per Timeline object."
920 1284 : )
921 1284 : .unwrap(),
922 1284 : deaths: register_int_counter!(
923 1284 : "pageserver_initial_logical_size_drop_finished_calculation",
924 1284 : "Incremented when we drop a finished initial logical size calculation result.\
925 1284 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
926 1284 : )
927 1284 : .unwrap(),
928 1284 : });
929 :
930 : pub(crate) struct OngoingCalculationGuard {
931 : inc_drop_calculation: Option<IntCounter>,
932 : }
933 :
934 : #[derive(strum_macros::IntoStaticStr)]
935 : pub(crate) enum StartCircumstances {
936 : EmptyInitial,
937 : SkippedConcurrencyLimiter,
938 : AfterBackgroundTasksRateLimit,
939 : }
940 :
941 : impl StartCalculation {
942 1356 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
943 1356 : let circumstances_label: &'static str = circumstances.into();
944 1356 : self.0
945 1356 : .with_label_values(&["first", circumstances_label])
946 1356 : .inc();
947 1356 : OngoingCalculationGuard {
948 1356 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
949 1356 : }
950 1356 : }
951 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
952 0 : let circumstances_label: &'static str = circumstances.into();
953 0 : self.0
954 0 : .with_label_values(&["retry", circumstances_label])
955 0 : .inc();
956 0 : OngoingCalculationGuard {
957 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
958 0 : }
959 0 : }
960 : }
961 :
962 : impl Drop for OngoingCalculationGuard {
963 1356 : fn drop(&mut self) {
964 1356 : if let Some(counter) = self.inc_drop_calculation.take() {
965 0 : counter.inc();
966 1356 : }
967 1356 : }
968 : }
969 :
970 : impl OngoingCalculationGuard {
971 1356 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
972 1356 : drop(self.inc_drop_calculation.take());
973 1356 : CALCULATED.births.inc();
974 1356 : FinishedCalculationGuard {
975 1356 : inc_on_drop: CALCULATED.deaths.clone(),
976 1356 : }
977 1356 : }
978 : }
979 :
980 : pub(crate) struct FinishedCalculationGuard {
981 : inc_on_drop: IntCounter,
982 : }
983 :
984 : impl Drop for FinishedCalculationGuard {
985 36 : fn drop(&mut self) {
986 36 : self.inc_on_drop.inc();
987 36 : }
988 : }
989 :
990 : // context: https://github.com/neondatabase/neon/issues/5963
991 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
992 0 : Lazy::new(|| {
993 0 : register_int_counter!(
994 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
995 0 : "Counter for the following event: walreceiver calls\
996 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
997 0 : )
998 0 : .unwrap()
999 0 : });
1000 : }
1001 :
1002 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
1003 0 : register_uint_gauge_vec!(
1004 0 : "pageserver_directory_entries_count",
1005 0 : "Sum of the entries in pageserver-stored directory listings",
1006 0 : &["tenant_id", "shard_id", "timeline_id"]
1007 0 : )
1008 0 : .expect("failed to define a metric")
1009 0 : });
1010 :
1011 1296 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1012 1296 : register_uint_gauge_vec!(
1013 1296 : "pageserver_tenant_states_count",
1014 1296 : "Count of tenants per state",
1015 1296 : &["state"]
1016 1296 : )
1017 1296 : .expect("Failed to register pageserver_tenant_states_count metric")
1018 1296 : });
1019 :
1020 : /// A set of broken tenants.
1021 : ///
1022 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
1023 : /// tenant.
1024 60 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
1025 60 : register_uint_gauge_vec!(
1026 60 : "pageserver_broken_tenants_count",
1027 60 : "Set of broken tenants",
1028 60 : &["tenant_id", "shard_id"]
1029 60 : )
1030 60 : .expect("Failed to register pageserver_tenant_states_count metric")
1031 60 : });
1032 :
1033 36 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
1034 36 : register_uint_gauge_vec!(
1035 36 : "pageserver_tenant_synthetic_cached_size_bytes",
1036 36 : "Synthetic size of each tenant in bytes",
1037 36 : &["tenant_id"]
1038 36 : )
1039 36 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
1040 36 : });
1041 :
1042 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
1043 0 : register_histogram_vec!(
1044 0 : "pageserver_eviction_iteration_duration_seconds_global",
1045 0 : "Time spent on a single eviction iteration",
1046 0 : &["period_secs", "threshold_secs"],
1047 0 : STORAGE_OP_BUCKETS.into(),
1048 0 : )
1049 0 : .expect("failed to define a metric")
1050 0 : });
1051 :
1052 1284 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
1053 1284 : register_int_counter_vec!(
1054 1284 : "pageserver_evictions",
1055 1284 : "Number of layers evicted from the pageserver",
1056 1284 : &["tenant_id", "shard_id", "timeline_id"]
1057 1284 : )
1058 1284 : .expect("failed to define a metric")
1059 1284 : });
1060 :
1061 1284 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
1062 1284 : register_int_counter_vec!(
1063 1284 : "pageserver_evictions_with_low_residence_duration",
1064 1284 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
1065 1284 : Residence duration is determined using the `residence_duration_data_source`.",
1066 1284 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
1067 1284 : )
1068 1284 : .expect("failed to define a metric")
1069 1284 : });
1070 :
1071 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
1072 0 : register_int_counter!(
1073 0 : "pageserver_unexpected_ondemand_downloads_count",
1074 0 : "Number of unexpected on-demand downloads. \
1075 0 : We log more context for each increment, so, forgo any labels in this metric.",
1076 0 : )
1077 0 : .expect("failed to define a metric")
1078 0 : });
1079 :
1080 : /// How long did we take to start up? Broken down by labels to describe
1081 : /// different phases of startup.
1082 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
1083 0 : register_gauge_vec!(
1084 0 : "pageserver_startup_duration_seconds",
1085 0 : "Time taken by phases of pageserver startup, in seconds",
1086 0 : &["phase"]
1087 0 : )
1088 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
1089 0 : });
1090 :
1091 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
1092 0 : register_uint_gauge!(
1093 0 : "pageserver_startup_is_loading",
1094 0 : "1 while in initial startup load of tenants, 0 at other times"
1095 0 : )
1096 0 : .expect("Failed to register pageserver_startup_is_loading")
1097 0 : });
1098 :
1099 1260 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
1100 1260 : register_uint_gauge!(
1101 1260 : "pageserver_timeline_ephemeral_bytes",
1102 1260 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
1103 1260 : )
1104 1260 : .expect("Failed to register metric")
1105 1260 : });
1106 :
1107 : /// Metrics related to the lifecycle of a [`crate::tenant::TenantShard`] object: things
1108 : /// like how long it took to load.
1109 : ///
1110 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
1111 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
1112 : /// at a timeline level than tenant level.
1113 : pub(crate) struct TenantMetrics {
1114 : /// How long did tenants take to go from construction to active state?
1115 : pub(crate) activation: Histogram,
1116 : pub(crate) preload: Histogram,
1117 : pub(crate) attach: Histogram,
1118 :
1119 : /// How many tenants are included in the initial startup of the pagesrever?
1120 : pub(crate) startup_scheduled: IntCounter,
1121 : pub(crate) startup_complete: IntCounter,
1122 : }
1123 :
1124 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
1125 0 : TenantMetrics {
1126 0 : activation: register_histogram!(
1127 0 : "pageserver_tenant_activation_seconds",
1128 0 : "Time taken by tenants to activate, in seconds",
1129 0 : CRITICAL_OP_BUCKETS.into()
1130 0 : )
1131 0 : .expect("Failed to register metric"),
1132 0 : preload: register_histogram!(
1133 0 : "pageserver_tenant_preload_seconds",
1134 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
1135 0 : CRITICAL_OP_BUCKETS.into()
1136 0 : )
1137 0 : .expect("Failed to register metric"),
1138 0 : attach: register_histogram!(
1139 0 : "pageserver_tenant_attach_seconds",
1140 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
1141 0 : CRITICAL_OP_BUCKETS.into()
1142 0 : )
1143 0 : .expect("Failed to register metric"),
1144 0 : startup_scheduled: register_int_counter!(
1145 0 : "pageserver_tenant_startup_scheduled",
1146 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
1147 0 : ).expect("Failed to register metric"),
1148 0 : startup_complete: register_int_counter!(
1149 0 : "pageserver_tenant_startup_complete",
1150 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
1151 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
1152 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
1153 0 : ).expect("Failed to register metric"),
1154 0 : }
1155 0 : });
1156 :
1157 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
1158 : #[derive(Debug)]
1159 : pub(crate) struct EvictionsWithLowResidenceDuration {
1160 : data_source: &'static str,
1161 : threshold: Duration,
1162 : counter: Option<IntCounter>,
1163 : }
1164 :
1165 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
1166 : data_source: &'static str,
1167 : threshold: Duration,
1168 : }
1169 :
1170 : impl EvictionsWithLowResidenceDurationBuilder {
1171 2796 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
1172 2796 : Self {
1173 2796 : data_source,
1174 2796 : threshold,
1175 2796 : }
1176 2796 : }
1177 :
1178 2796 : fn build(
1179 2796 : &self,
1180 2796 : tenant_id: &str,
1181 2796 : shard_id: &str,
1182 2796 : timeline_id: &str,
1183 2796 : ) -> EvictionsWithLowResidenceDuration {
1184 2796 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
1185 2796 : .get_metric_with_label_values(&[
1186 2796 : tenant_id,
1187 2796 : shard_id,
1188 2796 : timeline_id,
1189 2796 : self.data_source,
1190 2796 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
1191 2796 : ])
1192 2796 : .unwrap();
1193 2796 : EvictionsWithLowResidenceDuration {
1194 2796 : data_source: self.data_source,
1195 2796 : threshold: self.threshold,
1196 2796 : counter: Some(counter),
1197 2796 : }
1198 2796 : }
1199 : }
1200 :
1201 : impl EvictionsWithLowResidenceDuration {
1202 2856 : fn threshold_label_value(threshold: Duration) -> String {
1203 2856 : format!("{}", threshold.as_secs())
1204 2856 : }
1205 :
1206 24 : pub fn observe(&self, observed_value: Duration) {
1207 24 : if observed_value < self.threshold {
1208 24 : self.counter
1209 24 : .as_ref()
1210 24 : .expect("nobody calls this function after `remove_from_vec`")
1211 24 : .inc();
1212 24 : }
1213 24 : }
1214 :
1215 0 : pub fn change_threshold(
1216 0 : &mut self,
1217 0 : tenant_id: &str,
1218 0 : shard_id: &str,
1219 0 : timeline_id: &str,
1220 0 : new_threshold: Duration,
1221 0 : ) {
1222 0 : if new_threshold == self.threshold {
1223 0 : return;
1224 0 : }
1225 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1226 0 : self.data_source,
1227 0 : new_threshold,
1228 0 : )
1229 0 : .build(tenant_id, shard_id, timeline_id);
1230 0 : std::mem::swap(self, &mut with_new);
1231 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1232 0 : }
1233 :
1234 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1235 60 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1236 60 : let Some(_counter) = self.counter.take() else {
1237 0 : return;
1238 : };
1239 :
1240 60 : let threshold = Self::threshold_label_value(self.threshold);
1241 60 :
1242 60 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1243 60 : tenant_id,
1244 60 : shard_id,
1245 60 : timeline_id,
1246 60 : self.data_source,
1247 60 : &threshold,
1248 60 : ]);
1249 60 :
1250 60 : match removed {
1251 0 : Err(e) => {
1252 0 : // this has been hit in staging as
1253 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1254 0 : // because we can be in the drop path already, don't risk:
1255 0 : // - "double-panic => illegal instruction" or
1256 0 : // - future "drop panick => abort"
1257 0 : //
1258 0 : // so just nag: (the error has the labels)
1259 0 : tracing::warn!(
1260 0 : "failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}"
1261 : );
1262 : }
1263 : Ok(()) => {
1264 : // to help identify cases where we double-remove the same values, let's log all
1265 : // deletions?
1266 60 : tracing::info!(
1267 0 : "removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}",
1268 : self.data_source
1269 : );
1270 : }
1271 : }
1272 60 : }
1273 : }
1274 :
1275 : // Metrics collected on disk IO operations
1276 : //
1277 : // Roughly logarithmic scale.
1278 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1279 : 0.000030, // 30 usec
1280 : 0.001000, // 1000 usec
1281 : 0.030, // 30 ms
1282 : 1.000, // 1000 ms
1283 : 30.000, // 30000 ms
1284 : ];
1285 :
1286 : /// VirtualFile fs operation variants.
1287 : ///
1288 : /// Operations:
1289 : /// - open ([`std::fs::OpenOptions::open`])
1290 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1291 : /// - close-by-replace (close by replacement algorithm)
1292 : /// - read (`read_at`)
1293 : /// - write (`write_at`)
1294 : /// - seek (modify internal position or file length query)
1295 : /// - fsync ([`std::fs::File::sync_all`])
1296 : /// - metadata ([`std::fs::File::metadata`])
1297 : #[derive(
1298 0 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1299 : )]
1300 : pub(crate) enum StorageIoOperation {
1301 : Open,
1302 : OpenAfterReplace,
1303 : Close,
1304 : CloseByReplace,
1305 : Read,
1306 : Write,
1307 : Seek,
1308 : Fsync,
1309 : Metadata,
1310 : SetLen,
1311 : }
1312 :
1313 : impl StorageIoOperation {
1314 14640 : pub fn as_str(&self) -> &'static str {
1315 14640 : match self {
1316 1464 : StorageIoOperation::Open => "open",
1317 1464 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1318 1464 : StorageIoOperation::Close => "close",
1319 1464 : StorageIoOperation::CloseByReplace => "close-by-replace",
1320 1464 : StorageIoOperation::Read => "read",
1321 1464 : StorageIoOperation::Write => "write",
1322 1464 : StorageIoOperation::Seek => "seek",
1323 1464 : StorageIoOperation::Fsync => "fsync",
1324 1464 : StorageIoOperation::Metadata => "metadata",
1325 1464 : StorageIoOperation::SetLen => "set_len",
1326 : }
1327 14640 : }
1328 : }
1329 :
1330 : /// Tracks time taken by fs operations near VirtualFile.
1331 : #[derive(Debug)]
1332 : pub(crate) struct StorageIoTime {
1333 : metrics: [Histogram; StorageIoOperation::COUNT],
1334 : }
1335 :
1336 : impl StorageIoTime {
1337 1464 : fn new() -> Self {
1338 1464 : let storage_io_histogram_vec = register_histogram_vec!(
1339 1464 : "pageserver_io_operations_seconds",
1340 1464 : "Time spent in IO operations",
1341 1464 : &["operation"],
1342 1464 : STORAGE_IO_TIME_BUCKETS.into()
1343 1464 : )
1344 1464 : .expect("failed to define a metric");
1345 14640 : let metrics = std::array::from_fn(|i| {
1346 14640 : let op = StorageIoOperation::from_repr(i).unwrap();
1347 14640 : storage_io_histogram_vec
1348 14640 : .get_metric_with_label_values(&[op.as_str()])
1349 14640 : .unwrap()
1350 14640 : });
1351 1464 : Self { metrics }
1352 1464 : }
1353 :
1354 5890040 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1355 5890040 : &self.metrics[op as usize]
1356 5890040 : }
1357 : }
1358 :
1359 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1360 :
1361 : #[derive(Clone, Copy)]
1362 : #[repr(usize)]
1363 : pub(crate) enum StorageIoSizeOperation {
1364 : Read,
1365 : Write,
1366 : }
1367 :
1368 : impl StorageIoSizeOperation {
1369 : pub(crate) const VARIANTS: &'static [&'static str] = &["read", "write"];
1370 :
1371 9048 : fn as_str(&self) -> &'static str {
1372 9048 : Self::VARIANTS[*self as usize]
1373 9048 : }
1374 : }
1375 :
1376 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1377 1728 : pub(crate) static STORAGE_IO_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1378 1728 : register_uint_gauge_vec!(
1379 1728 : "pageserver_io_operations_bytes_total",
1380 1728 : "Total amount of bytes read/written in IO operations",
1381 1728 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1382 1728 : )
1383 1728 : .expect("failed to define a metric")
1384 1728 : });
1385 :
1386 : #[derive(Clone, Debug)]
1387 : pub(crate) struct StorageIoSizeMetrics {
1388 : pub read: UIntGauge,
1389 : pub write: UIntGauge,
1390 : }
1391 :
1392 : impl StorageIoSizeMetrics {
1393 4524 : pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self {
1394 4524 : let read = STORAGE_IO_SIZE
1395 4524 : .get_metric_with_label_values(&[
1396 4524 : StorageIoSizeOperation::Read.as_str(),
1397 4524 : tenant_id,
1398 4524 : shard_id,
1399 4524 : timeline_id,
1400 4524 : ])
1401 4524 : .unwrap();
1402 4524 : let write = STORAGE_IO_SIZE
1403 4524 : .get_metric_with_label_values(&[
1404 4524 : StorageIoSizeOperation::Write.as_str(),
1405 4524 : tenant_id,
1406 4524 : shard_id,
1407 4524 : timeline_id,
1408 4524 : ])
1409 4524 : .unwrap();
1410 4524 : Self { read, write }
1411 4524 : }
1412 : }
1413 :
1414 : #[cfg(not(test))]
1415 : pub(crate) mod virtual_file_descriptor_cache {
1416 : use super::*;
1417 :
1418 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1419 0 : register_uint_gauge!(
1420 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1421 0 : "Maximum number of open file descriptors in the cache."
1422 0 : )
1423 0 : .unwrap()
1424 0 : });
1425 :
1426 : // SIZE_CURRENT: derive it like so:
1427 : // ```
1428 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1429 : // -ignoring(operation)
1430 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1431 : // ```
1432 : }
1433 :
1434 : #[cfg(not(test))]
1435 : pub(crate) mod virtual_file_io_engine {
1436 : use super::*;
1437 :
1438 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1439 0 : register_uint_gauge_vec!(
1440 0 : "pageserver_virtual_file_io_engine_kind",
1441 0 : "The configured io engine for VirtualFile",
1442 0 : &["kind"],
1443 0 : )
1444 0 : .unwrap()
1445 0 : });
1446 : }
1447 :
1448 : pub(crate) struct SmgrOpTimer(Option<SmgrOpTimerInner>);
1449 : pub(crate) struct SmgrOpTimerInner {
1450 : global_execution_latency_histo: Histogram,
1451 : per_timeline_execution_latency_histo: Option<Histogram>,
1452 :
1453 : global_batch_wait_time: Histogram,
1454 : per_timeline_batch_wait_time: Histogram,
1455 :
1456 : global_flush_in_progress_micros: IntCounter,
1457 : per_timeline_flush_in_progress_micros: IntCounter,
1458 :
1459 : throttling: Arc<tenant_throttling::Pagestream>,
1460 :
1461 : timings: SmgrOpTimerState,
1462 : }
1463 :
1464 : /// The stages of request processing are represented by the enum variants.
1465 : /// Used as part of [`SmgrOpTimerInner::timings`].
1466 : ///
1467 : /// Request processing calls into the `SmgrOpTimer::observe_*` methods at the
1468 : /// transition points.
1469 : /// These methods bump relevant counters and then update [`SmgrOpTimerInner::timings`]
1470 : /// to the next state.
1471 : ///
1472 : /// Each request goes through every stage, in all configurations.
1473 : ///
1474 : #[derive(Debug)]
1475 : enum SmgrOpTimerState {
1476 : Received {
1477 : // In the future, we may want to track the full time the request spent
1478 : // inside pageserver process (time spent in kernel buffers can't be tracked).
1479 : // `received_at` would be used for that.
1480 : #[allow(dead_code)]
1481 : received_at: Instant,
1482 : },
1483 : Throttling {
1484 : throttle_started_at: Instant,
1485 : },
1486 : Batching {
1487 : throttle_done_at: Instant,
1488 : },
1489 : Executing {
1490 : execution_started_at: Instant,
1491 : },
1492 : Flushing,
1493 : // NB: when adding observation points, remember to update the Drop impl.
1494 : }
1495 :
1496 : // NB: when adding observation points, remember to update the Drop impl.
1497 : impl SmgrOpTimer {
1498 : /// See [`SmgrOpTimerState`] for more context.
1499 0 : pub(crate) fn observe_throttle_start(&mut self, at: Instant) {
1500 0 : let Some(inner) = self.0.as_mut() else {
1501 0 : return;
1502 : };
1503 0 : let SmgrOpTimerState::Received { received_at: _ } = &mut inner.timings else {
1504 0 : return;
1505 : };
1506 0 : inner.throttling.count_accounted_start.inc();
1507 0 : inner.timings = SmgrOpTimerState::Throttling {
1508 0 : throttle_started_at: at,
1509 0 : };
1510 0 : }
1511 :
1512 : /// See [`SmgrOpTimerState`] for more context.
1513 0 : pub(crate) fn observe_throttle_done(&mut self, throttle: ThrottleResult) {
1514 0 : let Some(inner) = self.0.as_mut() else {
1515 0 : return;
1516 : };
1517 : let SmgrOpTimerState::Throttling {
1518 0 : throttle_started_at,
1519 0 : } = &inner.timings
1520 : else {
1521 0 : return;
1522 : };
1523 0 : inner.throttling.count_accounted_finish.inc();
1524 0 : match throttle {
1525 0 : ThrottleResult::NotThrottled { end } => {
1526 0 : inner.timings = SmgrOpTimerState::Batching {
1527 0 : throttle_done_at: end,
1528 0 : };
1529 0 : }
1530 0 : ThrottleResult::Throttled { end } => {
1531 0 : // update metrics
1532 0 : inner.throttling.count_throttled.inc();
1533 0 : inner
1534 0 : .throttling
1535 0 : .wait_time
1536 0 : .inc_by((end - *throttle_started_at).as_micros().try_into().unwrap());
1537 0 : // state transition
1538 0 : inner.timings = SmgrOpTimerState::Batching {
1539 0 : throttle_done_at: end,
1540 0 : };
1541 0 : }
1542 : }
1543 0 : }
1544 :
1545 : /// See [`SmgrOpTimerState`] for more context.
1546 0 : pub(crate) fn observe_execution_start(&mut self, at: Instant) {
1547 0 : let Some(inner) = self.0.as_mut() else {
1548 0 : return;
1549 : };
1550 0 : let SmgrOpTimerState::Batching { throttle_done_at } = &inner.timings else {
1551 0 : return;
1552 : };
1553 : // update metrics
1554 0 : let batch = at - *throttle_done_at;
1555 0 : inner.global_batch_wait_time.observe(batch.as_secs_f64());
1556 0 : inner
1557 0 : .per_timeline_batch_wait_time
1558 0 : .observe(batch.as_secs_f64());
1559 0 : // state transition
1560 0 : inner.timings = SmgrOpTimerState::Executing {
1561 0 : execution_started_at: at,
1562 0 : }
1563 0 : }
1564 :
1565 : /// For all but the first caller, this is a no-op.
1566 : /// The first callers receives Some, subsequent ones None.
1567 : ///
1568 : /// See [`SmgrOpTimerState`] for more context.
1569 0 : pub(crate) fn observe_execution_end(&mut self, at: Instant) -> Option<SmgrOpFlushInProgress> {
1570 : // NB: unlike the other observe_* methods, this one take()s.
1571 : #[allow(clippy::question_mark)] // maintain similar code pattern.
1572 0 : let Some(mut inner) = self.0.take() else {
1573 0 : return None;
1574 : };
1575 : let SmgrOpTimerState::Executing {
1576 0 : execution_started_at,
1577 0 : } = &inner.timings
1578 : else {
1579 0 : return None;
1580 : };
1581 : // update metrics
1582 0 : let execution = at - *execution_started_at;
1583 0 : inner
1584 0 : .global_execution_latency_histo
1585 0 : .observe(execution.as_secs_f64());
1586 0 : if let Some(per_timeline_execution_latency_histo) =
1587 0 : &inner.per_timeline_execution_latency_histo
1588 0 : {
1589 0 : per_timeline_execution_latency_histo.observe(execution.as_secs_f64());
1590 0 : }
1591 :
1592 : // state transition
1593 0 : inner.timings = SmgrOpTimerState::Flushing;
1594 0 :
1595 0 : // return the flush in progress object which
1596 0 : // will do the remaining metrics updates
1597 0 : let SmgrOpTimerInner {
1598 0 : global_flush_in_progress_micros,
1599 0 : per_timeline_flush_in_progress_micros,
1600 0 : ..
1601 0 : } = inner;
1602 0 : Some(SmgrOpFlushInProgress {
1603 0 : global_micros: global_flush_in_progress_micros,
1604 0 : per_timeline_micros: per_timeline_flush_in_progress_micros,
1605 0 : })
1606 0 : }
1607 : }
1608 :
1609 : /// The last stage of request processing is serializing and flushing the request
1610 : /// into the TCP connection. We want to make slow flushes observable
1611 : /// _while they are occuring_, so this struct provides a wrapper method [`Self::measure`]
1612 : /// to periodically bump the metric.
1613 : ///
1614 : /// If in the future we decide that we're not interested in live updates, we can
1615 : /// add another `observe_*` method to [`SmgrOpTimer`], follow the existing pattern there,
1616 : /// and remove this struct from the code base.
1617 : pub(crate) struct SmgrOpFlushInProgress {
1618 : global_micros: IntCounter,
1619 : per_timeline_micros: IntCounter,
1620 : }
1621 :
1622 : impl Drop for SmgrOpTimer {
1623 0 : fn drop(&mut self) {
1624 0 : // In case of early drop, update any of the remaining metrics with
1625 0 : // observations so that (started,finished) counter pairs balance out
1626 0 : // and all counters on the latency path have the the same number of
1627 0 : // observations.
1628 0 : // It's technically lying and it would be better if each metric had
1629 0 : // a separate label or similar for cancelled requests.
1630 0 : // But we don't have that right now and counter pairs balancing
1631 0 : // out is useful when using the metrics in panels and whatnot.
1632 0 : let now = Instant::now();
1633 0 : self.observe_throttle_start(now);
1634 0 : self.observe_throttle_done(ThrottleResult::NotThrottled { end: now });
1635 0 : self.observe_execution_start(now);
1636 0 : let maybe_flush_timer = self.observe_execution_end(now);
1637 0 : drop(maybe_flush_timer);
1638 0 : }
1639 : }
1640 :
1641 : impl SmgrOpFlushInProgress {
1642 : /// The caller must guarantee that `socket_fd`` outlives this function.
1643 0 : pub(crate) async fn measure<Fut, O>(
1644 0 : self,
1645 0 : started_at: Instant,
1646 0 : mut fut: Fut,
1647 0 : socket_fd: RawFd,
1648 0 : ) -> O
1649 0 : where
1650 0 : Fut: std::future::Future<Output = O>,
1651 0 : {
1652 0 : let mut fut = std::pin::pin!(fut);
1653 0 :
1654 0 : let mut logged = false;
1655 0 : let mut last_counter_increment_at = started_at;
1656 0 : let mut observe_guard = scopeguard::guard(
1657 0 : |is_timeout| {
1658 0 : let now = Instant::now();
1659 0 :
1660 0 : // Increment counter
1661 0 : {
1662 0 : let elapsed_since_last_observe = now - last_counter_increment_at;
1663 0 : self.global_micros
1664 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1665 0 : self.per_timeline_micros
1666 0 : .inc_by(u64::try_from(elapsed_since_last_observe.as_micros()).unwrap());
1667 0 : last_counter_increment_at = now;
1668 0 : }
1669 0 :
1670 0 : // Log something on every timeout, and on completion but only if we hit a timeout.
1671 0 : if is_timeout || logged {
1672 0 : logged = true;
1673 0 : let elapsed_total = now - started_at;
1674 0 : let msg = if is_timeout {
1675 0 : "slow flush ongoing"
1676 : } else {
1677 0 : "slow flush completed or cancelled"
1678 : };
1679 :
1680 0 : let (inq, outq) = {
1681 0 : // SAFETY: caller guarantees that `socket_fd` outlives this function.
1682 0 : #[cfg(target_os = "linux")]
1683 0 : unsafe {
1684 0 : (
1685 0 : utils::linux_socket_ioctl::inq(socket_fd).unwrap_or(-2),
1686 0 : utils::linux_socket_ioctl::outq(socket_fd).unwrap_or(-2),
1687 0 : )
1688 0 : }
1689 0 : #[cfg(not(target_os = "linux"))]
1690 0 : {
1691 0 : _ = socket_fd; // appease unused lint on macOS
1692 0 : (-1, -1)
1693 0 : }
1694 0 : };
1695 0 :
1696 0 : let elapsed_total_secs = format!("{:.6}", elapsed_total.as_secs_f64());
1697 0 : tracing::info!(elapsed_total_secs, inq, outq, msg);
1698 0 : }
1699 0 : },
1700 0 : |mut observe| {
1701 0 : observe(false);
1702 0 : },
1703 0 : );
1704 :
1705 : loop {
1706 0 : match tokio::time::timeout(Duration::from_secs(10), &mut fut).await {
1707 0 : Ok(v) => return v,
1708 0 : Err(_timeout) => {
1709 0 : (*observe_guard)(true);
1710 0 : }
1711 : }
1712 : }
1713 0 : }
1714 : }
1715 :
1716 : #[derive(
1717 : Debug,
1718 : Clone,
1719 : Copy,
1720 : IntoStaticStr,
1721 : strum_macros::EnumCount,
1722 0 : strum_macros::EnumIter,
1723 : strum_macros::FromRepr,
1724 : enum_map::Enum,
1725 : )]
1726 : #[strum(serialize_all = "snake_case")]
1727 : pub enum SmgrQueryType {
1728 : GetRelExists,
1729 : GetRelSize,
1730 : GetPageAtLsn,
1731 : GetDbSize,
1732 : GetSlruSegment,
1733 : #[cfg(feature = "testing")]
1734 : Test,
1735 : }
1736 :
1737 : #[derive(
1738 : Debug,
1739 : Clone,
1740 : Copy,
1741 : IntoStaticStr,
1742 : strum_macros::EnumCount,
1743 540 : strum_macros::EnumIter,
1744 : strum_macros::FromRepr,
1745 : enum_map::Enum,
1746 : )]
1747 : #[strum(serialize_all = "snake_case")]
1748 : pub enum GetPageBatchBreakReason {
1749 : BatchFull,
1750 : NonBatchableRequest,
1751 : NonUniformLsn,
1752 : SamePageAtDifferentLsn,
1753 : NonUniformTimeline,
1754 : ExecutorSteal,
1755 : #[cfg(feature = "testing")]
1756 : NonUniformKey,
1757 : }
1758 :
1759 : pub(crate) struct SmgrQueryTimePerTimeline {
1760 : global_started: [IntCounter; SmgrQueryType::COUNT],
1761 : global_latency: [Histogram; SmgrQueryType::COUNT],
1762 : per_timeline_getpage_started: IntCounter,
1763 : per_timeline_getpage_latency: Histogram,
1764 : global_batch_size: Histogram,
1765 : per_timeline_batch_size: Histogram,
1766 : global_flush_in_progress_micros: IntCounter,
1767 : per_timeline_flush_in_progress_micros: IntCounter,
1768 : global_batch_wait_time: Histogram,
1769 : per_timeline_batch_wait_time: Histogram,
1770 : global_batch_break_reason: [IntCounter; GetPageBatchBreakReason::COUNT],
1771 : per_timeline_batch_break_reason: GetPageBatchBreakReasonTimelineMetrics,
1772 : throttling: Arc<tenant_throttling::Pagestream>,
1773 : }
1774 :
1775 1284 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1776 1284 : register_int_counter_vec!(
1777 1284 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1778 1284 : "pageserver_smgr_query_started_global_count",
1779 1284 : "Number of smgr queries started, aggregated by query type.",
1780 1284 : &["smgr_query_type"],
1781 1284 : )
1782 1284 : .expect("failed to define a metric")
1783 1284 : });
1784 :
1785 1284 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1786 1284 : register_int_counter_vec!(
1787 1284 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1788 1284 : "pageserver_smgr_query_started_count",
1789 1284 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1790 1284 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1791 1284 : )
1792 1284 : .expect("failed to define a metric")
1793 1284 : });
1794 :
1795 : /// Per-timeline smgr histogram buckets should be the same as the compute buckets, such that the
1796 : /// metrics are comparable across compute and Pageserver. See also:
1797 : /// <https://github.com/neondatabase/neon/blob/1a87975d956a8ad17ec8b85da32a137ec4893fcc/pgxn/neon/neon_perf_counters.h#L18-L27>
1798 : /// <https://github.com/neondatabase/flux-fleet/blob/556182a939edda87ff1d85a6b02e5cec901e0e9e/apps/base/compute-metrics/scrape-compute-sql-exporter.yaml#L29-L35>
1799 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS: &[f64] =
1800 : &[0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.1, 1.0, 3.0];
1801 :
1802 1284 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1803 1284 : register_histogram_vec!(
1804 1284 : "pageserver_smgr_query_seconds",
1805 1284 : "Time spent _executing_ smgr query handling, excluding batch and throttle delays.",
1806 1284 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1807 1284 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
1808 1284 : )
1809 1284 : .expect("failed to define a metric")
1810 1284 : });
1811 :
1812 1284 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1813 1284 : [
1814 1284 : 1,
1815 1284 : 10,
1816 1284 : 20,
1817 1284 : 40,
1818 1284 : 60,
1819 1284 : 80,
1820 1284 : 100,
1821 1284 : 200,
1822 1284 : 300,
1823 1284 : 400,
1824 1284 : 500,
1825 1284 : 600,
1826 1284 : 700,
1827 1284 : 800,
1828 1284 : 900,
1829 1284 : 1_000, // 1ms
1830 1284 : 2_000,
1831 1284 : 4_000,
1832 1284 : 6_000,
1833 1284 : 8_000,
1834 1284 : 10_000, // 10ms
1835 1284 : 20_000,
1836 1284 : 40_000,
1837 1284 : 60_000,
1838 1284 : 80_000,
1839 1284 : 100_000,
1840 1284 : 200_000,
1841 1284 : 400_000,
1842 1284 : 600_000,
1843 1284 : 800_000,
1844 1284 : 1_000_000, // 1s
1845 1284 : 2_000_000,
1846 1284 : 4_000_000,
1847 1284 : 6_000_000,
1848 1284 : 8_000_000,
1849 1284 : 10_000_000, // 10s
1850 1284 : 20_000_000,
1851 1284 : 50_000_000,
1852 1284 : 100_000_000,
1853 1284 : 200_000_000,
1854 1284 : 1_000_000_000, // 1000s
1855 1284 : ]
1856 1284 : .into_iter()
1857 1284 : .map(Duration::from_micros)
1858 52644 : .map(|d| d.as_secs_f64())
1859 1284 : .collect()
1860 1284 : });
1861 :
1862 1284 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1863 1284 : register_histogram_vec!(
1864 1284 : "pageserver_smgr_query_seconds_global",
1865 1284 : "Like pageserver_smgr_query_seconds, but aggregated to instance level.",
1866 1284 : &["smgr_query_type"],
1867 1284 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1868 1284 : )
1869 1284 : .expect("failed to define a metric")
1870 1284 : });
1871 :
1872 1284 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL: Lazy<Vec<f64>> = Lazy::new(|| {
1873 1284 : (1..=u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap())
1874 41088 : .map(|v| v.into())
1875 1284 : .collect()
1876 1284 : });
1877 :
1878 1284 : static PAGE_SERVICE_BATCH_SIZE_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
1879 1284 : register_histogram!(
1880 1284 : "pageserver_page_service_batch_size_global",
1881 1284 : "Batch size of pageserver page service requests",
1882 1284 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_GLOBAL.clone(),
1883 1284 : )
1884 1284 : .expect("failed to define a metric")
1885 1284 : });
1886 :
1887 1284 : static PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE: Lazy<Vec<f64>> = Lazy::new(|| {
1888 1284 : let mut buckets = Vec::new();
1889 8988 : for i in 0.. {
1890 8988 : let bucket = 1 << i;
1891 8988 : if bucket > u32::try_from(Timeline::MAX_GET_VECTORED_KEYS).unwrap() {
1892 1284 : break;
1893 7704 : }
1894 7704 : buckets.push(bucket.into());
1895 : }
1896 1284 : buckets
1897 1284 : });
1898 :
1899 1284 : static PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1900 1284 : register_histogram_vec!(
1901 1284 : "pageserver_page_service_batch_size",
1902 1284 : "Batch size of pageserver page service requests",
1903 1284 : &["tenant_id", "shard_id", "timeline_id"],
1904 1284 : PAGE_SERVICE_BATCH_SIZE_BUCKETS_PER_TIMELINE.clone()
1905 1284 : )
1906 1284 : .expect("failed to define a metric")
1907 1284 : });
1908 :
1909 1284 : static PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1910 1284 : register_int_counter_vec!(
1911 1284 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1912 1284 : "pageserver_page_service_batch_break_reason_global",
1913 1284 : "Reason for breaking batches of get page requests",
1914 1284 : &["reason"],
1915 1284 : )
1916 1284 : .expect("failed to define a metric")
1917 1284 : });
1918 :
1919 : struct GetPageBatchBreakReasonTimelineMetrics {
1920 : map: EnumMap<GetPageBatchBreakReason, IntCounter>,
1921 : }
1922 :
1923 : impl GetPageBatchBreakReasonTimelineMetrics {
1924 2796 : fn new(tenant_id: &str, shard_slug: &str, timeline_id: &str) -> Self {
1925 2796 : GetPageBatchBreakReasonTimelineMetrics {
1926 19572 : map: EnumMap::from_array(std::array::from_fn(|reason_idx| {
1927 19572 : let reason = GetPageBatchBreakReason::from_usize(reason_idx);
1928 19572 : PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.with_label_values(&[
1929 19572 : tenant_id,
1930 19572 : shard_slug,
1931 19572 : timeline_id,
1932 19572 : reason.into(),
1933 19572 : ])
1934 19572 : })),
1935 2796 : }
1936 2796 : }
1937 :
1938 0 : fn inc(&self, reason: GetPageBatchBreakReason) {
1939 0 : self.map[reason].inc()
1940 0 : }
1941 : }
1942 :
1943 1284 : static PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1944 1284 : register_int_counter_vec!(
1945 1284 : "pageserver_page_service_batch_break_reason",
1946 1284 : "Reason for breaking batches of get page requests",
1947 1284 : &["tenant_id", "shard_id", "timeline_id", "reason"],
1948 1284 : )
1949 1284 : .expect("failed to define a metric")
1950 1284 : });
1951 :
1952 0 : pub(crate) static PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1953 0 : register_int_gauge_vec!(
1954 0 : "pageserver_page_service_config_max_batch_size",
1955 0 : "Configured maximum batch size for the server-side batching functionality of page_service. \
1956 0 : Labels expose more of the configuration parameters.",
1957 0 : &["mode", "execution", "batching"]
1958 0 : )
1959 0 : .expect("failed to define a metric")
1960 0 : });
1961 :
1962 0 : fn set_page_service_config_max_batch_size(conf: &PageServicePipeliningConfig) {
1963 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE.reset();
1964 0 : let (label_values, value) = match conf {
1965 0 : PageServicePipeliningConfig::Serial => (["serial", "-", "-"], 1),
1966 : PageServicePipeliningConfig::Pipelined(PageServicePipeliningConfigPipelined {
1967 0 : max_batch_size,
1968 0 : execution,
1969 0 : batching,
1970 0 : }) => {
1971 0 : let mode = "pipelined";
1972 0 : let execution = match execution {
1973 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1974 0 : "concurrent-futures"
1975 : }
1976 0 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => "tasks",
1977 : };
1978 0 : let batching = match batching {
1979 0 : PageServiceProtocolPipelinedBatchingStrategy::UniformLsn => "uniform-lsn",
1980 0 : PageServiceProtocolPipelinedBatchingStrategy::ScatteredLsn => "scattered-lsn",
1981 : };
1982 :
1983 0 : ([mode, execution, batching], max_batch_size.get())
1984 : }
1985 : };
1986 0 : PAGE_SERVICE_CONFIG_MAX_BATCH_SIZE
1987 0 : .with_label_values(&label_values)
1988 0 : .set(value.try_into().unwrap());
1989 0 : }
1990 :
1991 1284 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| {
1992 1284 : register_int_counter_vec!(
1993 1284 : "pageserver_page_service_pagestream_flush_in_progress_micros",
1994 1284 : "Counter that sums up the microseconds that a pagestream response was being flushed into the TCP connection. \
1995 1284 : If the flush is particularly slow, this counter will be updated periodically to make slow flushes \
1996 1284 : easily discoverable in monitoring. \
1997 1284 : Hence, this is NOT a completion latency historgram.",
1998 1284 : &["tenant_id", "shard_id", "timeline_id"],
1999 1284 : )
2000 1284 : .expect("failed to define a metric")
2001 1284 : });
2002 :
2003 1284 : static PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL: Lazy<IntCounter> = Lazy::new(|| {
2004 1284 : register_int_counter!(
2005 1284 : "pageserver_page_service_pagestream_flush_in_progress_micros_global",
2006 1284 : "Like pageserver_page_service_pagestream_flush_in_progress_seconds, but instance-wide.",
2007 1284 : )
2008 1284 : .expect("failed to define a metric")
2009 1284 : });
2010 :
2011 1284 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME: Lazy<HistogramVec> = Lazy::new(|| {
2012 1284 : register_histogram_vec!(
2013 1284 : "pageserver_page_service_pagestream_batch_wait_time_seconds",
2014 1284 : "Time a request spent waiting in its batch until the batch moved to throttle&execution.",
2015 1284 : &["tenant_id", "shard_id", "timeline_id"],
2016 1284 : SMGR_QUERY_TIME_PER_TENANT_TIMELINE_BUCKETS.into(),
2017 1284 : )
2018 1284 : .expect("failed to define a metric")
2019 1284 : });
2020 :
2021 1284 : static PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL: Lazy<Histogram> = Lazy::new(|| {
2022 1284 : register_histogram!(
2023 1284 : "pageserver_page_service_pagestream_batch_wait_time_seconds_global",
2024 1284 : "Like pageserver_page_service_pagestream_batch_wait_time_seconds, but aggregated to instance level.",
2025 1284 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.to_vec(),
2026 1284 : )
2027 1284 : .expect("failed to define a metric")
2028 1284 : });
2029 :
2030 : impl SmgrQueryTimePerTimeline {
2031 2796 : pub(crate) fn new(
2032 2796 : tenant_shard_id: &TenantShardId,
2033 2796 : timeline_id: &TimelineId,
2034 2796 : pagestream_throttle_metrics: Arc<tenant_throttling::Pagestream>,
2035 2796 : ) -> Self {
2036 2796 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2037 2796 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
2038 2796 : let timeline_id = timeline_id.to_string();
2039 16776 : let global_started = std::array::from_fn(|i| {
2040 16776 : let op = SmgrQueryType::from_repr(i).unwrap();
2041 16776 : SMGR_QUERY_STARTED_GLOBAL
2042 16776 : .get_metric_with_label_values(&[op.into()])
2043 16776 : .unwrap()
2044 16776 : });
2045 16776 : let global_latency = std::array::from_fn(|i| {
2046 16776 : let op = SmgrQueryType::from_repr(i).unwrap();
2047 16776 : SMGR_QUERY_TIME_GLOBAL
2048 16776 : .get_metric_with_label_values(&[op.into()])
2049 16776 : .unwrap()
2050 16776 : });
2051 2796 :
2052 2796 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
2053 2796 : .get_metric_with_label_values(&[
2054 2796 : SmgrQueryType::GetPageAtLsn.into(),
2055 2796 : &tenant_id,
2056 2796 : &shard_slug,
2057 2796 : &timeline_id,
2058 2796 : ])
2059 2796 : .unwrap();
2060 2796 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
2061 2796 : .get_metric_with_label_values(&[
2062 2796 : SmgrQueryType::GetPageAtLsn.into(),
2063 2796 : &tenant_id,
2064 2796 : &shard_slug,
2065 2796 : &timeline_id,
2066 2796 : ])
2067 2796 : .unwrap();
2068 2796 :
2069 2796 : let global_batch_size = PAGE_SERVICE_BATCH_SIZE_GLOBAL.clone();
2070 2796 : let per_timeline_batch_size = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE
2071 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2072 2796 : .unwrap();
2073 2796 :
2074 2796 : let global_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL.clone();
2075 2796 : let per_timeline_batch_wait_time = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME
2076 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2077 2796 : .unwrap();
2078 2796 :
2079 19572 : let global_batch_break_reason = std::array::from_fn(|i| {
2080 19572 : let reason = GetPageBatchBreakReason::from_usize(i);
2081 19572 : PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL
2082 19572 : .get_metric_with_label_values(&[reason.into()])
2083 19572 : .unwrap()
2084 19572 : });
2085 2796 : let per_timeline_batch_break_reason =
2086 2796 : GetPageBatchBreakReasonTimelineMetrics::new(&tenant_id, &shard_slug, &timeline_id);
2087 2796 :
2088 2796 : let global_flush_in_progress_micros =
2089 2796 : PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL.clone();
2090 2796 : let per_timeline_flush_in_progress_micros = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS
2091 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_slug, &timeline_id])
2092 2796 : .unwrap();
2093 2796 :
2094 2796 : Self {
2095 2796 : global_started,
2096 2796 : global_latency,
2097 2796 : per_timeline_getpage_latency,
2098 2796 : per_timeline_getpage_started,
2099 2796 : global_batch_size,
2100 2796 : per_timeline_batch_size,
2101 2796 : global_flush_in_progress_micros,
2102 2796 : per_timeline_flush_in_progress_micros,
2103 2796 : global_batch_wait_time,
2104 2796 : per_timeline_batch_wait_time,
2105 2796 : global_batch_break_reason,
2106 2796 : per_timeline_batch_break_reason,
2107 2796 : throttling: pagestream_throttle_metrics,
2108 2796 : }
2109 2796 : }
2110 0 : pub(crate) fn start_smgr_op(&self, op: SmgrQueryType, received_at: Instant) -> SmgrOpTimer {
2111 0 : self.global_started[op as usize].inc();
2112 :
2113 0 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
2114 0 : self.per_timeline_getpage_started.inc();
2115 0 : Some(self.per_timeline_getpage_latency.clone())
2116 : } else {
2117 0 : None
2118 : };
2119 :
2120 0 : SmgrOpTimer(Some(SmgrOpTimerInner {
2121 0 : global_execution_latency_histo: self.global_latency[op as usize].clone(),
2122 0 : per_timeline_execution_latency_histo: per_timeline_latency_histo,
2123 0 : global_flush_in_progress_micros: self.global_flush_in_progress_micros.clone(),
2124 0 : per_timeline_flush_in_progress_micros: self
2125 0 : .per_timeline_flush_in_progress_micros
2126 0 : .clone(),
2127 0 : global_batch_wait_time: self.global_batch_wait_time.clone(),
2128 0 : per_timeline_batch_wait_time: self.per_timeline_batch_wait_time.clone(),
2129 0 : throttling: self.throttling.clone(),
2130 0 : timings: SmgrOpTimerState::Received { received_at },
2131 0 : }))
2132 0 : }
2133 :
2134 : /// TODO: do something about this? seems odd, we have a similar call on SmgrOpTimer
2135 0 : pub(crate) fn observe_getpage_batch_start(
2136 0 : &self,
2137 0 : batch_size: usize,
2138 0 : break_reason: GetPageBatchBreakReason,
2139 0 : ) {
2140 0 : self.global_batch_size.observe(batch_size as f64);
2141 0 : self.per_timeline_batch_size.observe(batch_size as f64);
2142 0 :
2143 0 : self.global_batch_break_reason[break_reason.into_usize()].inc();
2144 0 : self.per_timeline_batch_break_reason.inc(break_reason);
2145 0 : }
2146 : }
2147 :
2148 : // keep in sync with control plane Go code so that we can validate
2149 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
2150 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
2151 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
2152 0 : [
2153 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
2154 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
2155 0 : ]
2156 0 : .map(|ms| (ms as f64) / 1000.0)
2157 0 : });
2158 :
2159 : pub(crate) struct BasebackupQueryTime {
2160 : ok: Histogram,
2161 : error: Histogram,
2162 : client_error: Histogram,
2163 : }
2164 :
2165 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
2166 0 : let vec = register_histogram_vec!(
2167 0 : "pageserver_basebackup_query_seconds",
2168 0 : "Histogram of basebackup queries durations, by result type",
2169 0 : &["result"],
2170 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
2171 0 : )
2172 0 : .expect("failed to define a metric");
2173 0 : BasebackupQueryTime {
2174 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
2175 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
2176 0 : client_error: vec.get_metric_with_label_values(&["client_error"]).unwrap(),
2177 0 : }
2178 0 : });
2179 :
2180 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a> {
2181 : parent: &'a BasebackupQueryTime,
2182 : start: std::time::Instant,
2183 : }
2184 :
2185 : impl BasebackupQueryTime {
2186 0 : pub(crate) fn start_recording(&self) -> BasebackupQueryTimeOngoingRecording<'_> {
2187 0 : let start = Instant::now();
2188 0 : BasebackupQueryTimeOngoingRecording {
2189 0 : parent: self,
2190 0 : start,
2191 0 : }
2192 0 : }
2193 : }
2194 :
2195 : impl BasebackupQueryTimeOngoingRecording<'_> {
2196 0 : pub(crate) fn observe<T>(self, res: &Result<T, QueryError>) {
2197 0 : let elapsed = self.start.elapsed().as_secs_f64();
2198 : // If you want to change categorize of a specific error, also change it in `log_query_error`.
2199 0 : let metric = match res {
2200 0 : Ok(_) => &self.parent.ok,
2201 : Err(QueryError::Shutdown) => {
2202 : // Do not observe ok/err for shutdown
2203 0 : return;
2204 : }
2205 0 : Err(QueryError::Disconnected(ConnectionError::Io(io_error)))
2206 0 : if is_expected_io_error(io_error) =>
2207 0 : {
2208 0 : &self.parent.client_error
2209 : }
2210 0 : Err(_) => &self.parent.error,
2211 : };
2212 0 : metric.observe(elapsed);
2213 0 : }
2214 : }
2215 :
2216 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2217 0 : register_int_counter_pair_vec!(
2218 0 : "pageserver_live_connections_started",
2219 0 : "Number of network connections that we started handling",
2220 0 : "pageserver_live_connections_finished",
2221 0 : "Number of network connections that we finished handling",
2222 0 : &["pageserver_connection_kind"]
2223 0 : )
2224 0 : .expect("failed to define a metric")
2225 0 : });
2226 :
2227 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
2228 : pub(crate) enum ComputeCommandKind {
2229 : PageStreamV3,
2230 : PageStreamV2,
2231 : Basebackup,
2232 : Fullbackup,
2233 : LeaseLsn,
2234 : }
2235 :
2236 : pub(crate) struct ComputeCommandCounters {
2237 : map: EnumMap<ComputeCommandKind, IntCounter>,
2238 : }
2239 :
2240 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
2241 0 : let inner = register_int_counter_vec!(
2242 0 : "pageserver_compute_commands",
2243 0 : "Number of compute -> pageserver commands processed",
2244 0 : &["command"]
2245 0 : )
2246 0 : .expect("failed to define a metric");
2247 0 :
2248 0 : ComputeCommandCounters {
2249 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
2250 0 : let command = ComputeCommandKind::from_usize(i);
2251 0 : let command_str: &'static str = command.into();
2252 0 : inner.with_label_values(&[command_str])
2253 0 : })),
2254 0 : }
2255 0 : });
2256 :
2257 : impl ComputeCommandCounters {
2258 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
2259 0 : &self.map[command]
2260 0 : }
2261 : }
2262 :
2263 : // remote storage metrics
2264 :
2265 1260 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
2266 1260 : register_int_counter_pair_vec!(
2267 1260 : "pageserver_remote_timeline_client_calls_started",
2268 1260 : "Number of started calls to remote timeline client.",
2269 1260 : "pageserver_remote_timeline_client_calls_finished",
2270 1260 : "Number of finshed calls to remote timeline client.",
2271 1260 : &[
2272 1260 : "tenant_id",
2273 1260 : "shard_id",
2274 1260 : "timeline_id",
2275 1260 : "file_kind",
2276 1260 : "op_kind"
2277 1260 : ],
2278 1260 : )
2279 1260 : .unwrap()
2280 1260 : });
2281 :
2282 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
2283 1248 : Lazy::new(|| {
2284 1248 : register_int_counter_vec!(
2285 1248 : "pageserver_remote_timeline_client_bytes_started",
2286 1248 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2287 1248 : The increment happens when the operation is scheduled.",
2288 1248 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2289 1248 : )
2290 1248 : .expect("failed to define a metric")
2291 1248 : });
2292 :
2293 1248 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
2294 1248 : register_int_counter_vec!(
2295 1248 : "pageserver_remote_timeline_client_bytes_finished",
2296 1248 : "Incremented by the number of bytes associated with a remote timeline client operation. \
2297 1248 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
2298 1248 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
2299 1248 : )
2300 1248 : .expect("failed to define a metric")
2301 1248 : });
2302 :
2303 : pub(crate) struct TenantManagerMetrics {
2304 : tenant_slots_attached: UIntGauge,
2305 : tenant_slots_secondary: UIntGauge,
2306 : tenant_slots_inprogress: UIntGauge,
2307 : pub(crate) tenant_slot_writes: IntCounter,
2308 : pub(crate) unexpected_errors: IntCounter,
2309 : }
2310 :
2311 : impl TenantManagerMetrics {
2312 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
2313 : /// exactly: they track the lifetime of the slots _in the tenant map_.
2314 12 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
2315 12 : match slot {
2316 0 : TenantSlot::Attached(_) => {
2317 0 : self.tenant_slots_attached.inc();
2318 0 : }
2319 0 : TenantSlot::Secondary(_) => {
2320 0 : self.tenant_slots_secondary.inc();
2321 0 : }
2322 12 : TenantSlot::InProgress(_) => {
2323 12 : self.tenant_slots_inprogress.inc();
2324 12 : }
2325 : }
2326 12 : }
2327 :
2328 12 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
2329 12 : match slot {
2330 12 : TenantSlot::Attached(_) => {
2331 12 : self.tenant_slots_attached.dec();
2332 12 : }
2333 0 : TenantSlot::Secondary(_) => {
2334 0 : self.tenant_slots_secondary.dec();
2335 0 : }
2336 0 : TenantSlot::InProgress(_) => {
2337 0 : self.tenant_slots_inprogress.dec();
2338 0 : }
2339 : }
2340 12 : }
2341 :
2342 : #[cfg(all(debug_assertions, not(test)))]
2343 0 : pub(crate) fn slots_total(&self) -> u64 {
2344 0 : self.tenant_slots_attached.get()
2345 0 : + self.tenant_slots_secondary.get()
2346 0 : + self.tenant_slots_inprogress.get()
2347 0 : }
2348 : }
2349 :
2350 12 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
2351 12 : let tenant_slots = register_uint_gauge_vec!(
2352 12 : "pageserver_tenant_manager_slots",
2353 12 : "How many slots currently exist, including all attached, secondary and in-progress operations",
2354 12 : &["mode"]
2355 12 : )
2356 12 : .expect("failed to define a metric");
2357 12 : TenantManagerMetrics {
2358 12 : tenant_slots_attached: tenant_slots
2359 12 : .get_metric_with_label_values(&["attached"])
2360 12 : .unwrap(),
2361 12 : tenant_slots_secondary: tenant_slots
2362 12 : .get_metric_with_label_values(&["secondary"])
2363 12 : .unwrap(),
2364 12 : tenant_slots_inprogress: tenant_slots
2365 12 : .get_metric_with_label_values(&["inprogress"])
2366 12 : .unwrap(),
2367 12 : tenant_slot_writes: register_int_counter!(
2368 12 : "pageserver_tenant_manager_slot_writes",
2369 12 : "Writes to a tenant slot, including all of create/attach/detach/delete"
2370 12 : )
2371 12 : .expect("failed to define a metric"),
2372 12 : unexpected_errors: register_int_counter!(
2373 12 : "pageserver_tenant_manager_unexpected_errors_total",
2374 12 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
2375 12 : )
2376 12 : .expect("failed to define a metric"),
2377 12 : }
2378 12 : });
2379 :
2380 : pub(crate) struct DeletionQueueMetrics {
2381 : pub(crate) keys_submitted: IntCounter,
2382 : pub(crate) keys_dropped: IntCounter,
2383 : pub(crate) keys_executed: IntCounter,
2384 : pub(crate) keys_validated: IntCounter,
2385 : pub(crate) dropped_lsn_updates: IntCounter,
2386 : pub(crate) unexpected_errors: IntCounter,
2387 : pub(crate) remote_errors: IntCounterVec,
2388 : }
2389 197 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
2390 197 : DeletionQueueMetrics{
2391 197 :
2392 197 : keys_submitted: register_int_counter!(
2393 197 : "pageserver_deletion_queue_submitted_total",
2394 197 : "Number of objects submitted for deletion"
2395 197 : )
2396 197 : .expect("failed to define a metric"),
2397 197 :
2398 197 : keys_dropped: register_int_counter!(
2399 197 : "pageserver_deletion_queue_dropped_total",
2400 197 : "Number of object deletions dropped due to stale generation."
2401 197 : )
2402 197 : .expect("failed to define a metric"),
2403 197 :
2404 197 : keys_executed: register_int_counter!(
2405 197 : "pageserver_deletion_queue_executed_total",
2406 197 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
2407 197 : )
2408 197 : .expect("failed to define a metric"),
2409 197 :
2410 197 : keys_validated: register_int_counter!(
2411 197 : "pageserver_deletion_queue_validated_total",
2412 197 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
2413 197 : )
2414 197 : .expect("failed to define a metric"),
2415 197 :
2416 197 : dropped_lsn_updates: register_int_counter!(
2417 197 : "pageserver_deletion_queue_dropped_lsn_updates_total",
2418 197 : "Updates to remote_consistent_lsn dropped due to stale generation number."
2419 197 : )
2420 197 : .expect("failed to define a metric"),
2421 197 : unexpected_errors: register_int_counter!(
2422 197 : "pageserver_deletion_queue_unexpected_errors_total",
2423 197 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
2424 197 : )
2425 197 : .expect("failed to define a metric"),
2426 197 : remote_errors: register_int_counter_vec!(
2427 197 : "pageserver_deletion_queue_remote_errors_total",
2428 197 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
2429 197 : &["op_kind"],
2430 197 : )
2431 197 : .expect("failed to define a metric")
2432 197 : }
2433 197 : });
2434 :
2435 : pub(crate) struct SecondaryModeMetrics {
2436 : pub(crate) upload_heatmap: IntCounter,
2437 : pub(crate) upload_heatmap_errors: IntCounter,
2438 : pub(crate) upload_heatmap_duration: Histogram,
2439 : pub(crate) download_heatmap: IntCounter,
2440 : pub(crate) download_layer: IntCounter,
2441 : }
2442 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
2443 0 : SecondaryModeMetrics {
2444 0 : upload_heatmap: register_int_counter!(
2445 0 : "pageserver_secondary_upload_heatmap",
2446 0 : "Number of heatmaps written to remote storage by attached tenants"
2447 0 : )
2448 0 : .expect("failed to define a metric"),
2449 0 : upload_heatmap_errors: register_int_counter!(
2450 0 : "pageserver_secondary_upload_heatmap_errors",
2451 0 : "Failures writing heatmap to remote storage"
2452 0 : )
2453 0 : .expect("failed to define a metric"),
2454 0 : upload_heatmap_duration: register_histogram!(
2455 0 : "pageserver_secondary_upload_heatmap_duration",
2456 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
2457 0 : )
2458 0 : .expect("failed to define a metric"),
2459 0 : download_heatmap: register_int_counter!(
2460 0 : "pageserver_secondary_download_heatmap",
2461 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
2462 0 : )
2463 0 : .expect("failed to define a metric"),
2464 0 : download_layer: register_int_counter!(
2465 0 : "pageserver_secondary_download_layer",
2466 0 : "Number of downloads of layers by secondary mode locations"
2467 0 : )
2468 0 : .expect("failed to define a metric"),
2469 0 : }
2470 0 : });
2471 :
2472 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2473 0 : register_uint_gauge_vec!(
2474 0 : "pageserver_secondary_resident_physical_size",
2475 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
2476 0 : &["tenant_id", "shard_id"]
2477 0 : )
2478 0 : .expect("failed to define a metric")
2479 0 : });
2480 :
2481 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
2482 0 : register_uint_gauge!(
2483 0 : "pageserver_utilization_score",
2484 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
2485 0 : )
2486 0 : .expect("failed to define a metric")
2487 0 : });
2488 :
2489 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
2490 0 : register_uint_gauge_vec!(
2491 0 : "pageserver_secondary_heatmap_total_size",
2492 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
2493 0 : &["tenant_id", "shard_id"]
2494 0 : )
2495 0 : .expect("failed to define a metric")
2496 0 : });
2497 :
2498 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
2499 : pub enum RemoteOpKind {
2500 : Upload,
2501 : Download,
2502 : Delete,
2503 : }
2504 : impl RemoteOpKind {
2505 92718 : pub fn as_str(&self) -> &'static str {
2506 92718 : match self {
2507 87331 : Self::Upload => "upload",
2508 408 : Self::Download => "download",
2509 4979 : Self::Delete => "delete",
2510 : }
2511 92718 : }
2512 : }
2513 :
2514 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
2515 : pub enum RemoteOpFileKind {
2516 : Layer,
2517 : Index,
2518 : }
2519 : impl RemoteOpFileKind {
2520 92718 : pub fn as_str(&self) -> &'static str {
2521 92718 : match self {
2522 65222 : Self::Layer => "layer",
2523 27496 : Self::Index => "index",
2524 : }
2525 92718 : }
2526 : }
2527 :
2528 1239 : pub(crate) static REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY: Lazy<HistogramVec> = Lazy::new(|| {
2529 1239 : register_histogram_vec!(
2530 1239 : "pageserver_remote_timeline_client_seconds_global",
2531 1239 : "Time spent on remote timeline client operations. \
2532 1239 : Grouped by task_kind, file_kind, operation_kind and status. \
2533 1239 : The task_kind is \
2534 1239 : - for layer downloads, populated from RequestContext (primary objective of having the label) \
2535 1239 : - for index downloads, set to 'unknown' \
2536 1239 : - for any upload operation, set to 'RemoteUploadTask' \
2537 1239 : This keeps dimensionality at bay. \
2538 1239 : Does not account for time spent waiting in remote timeline client's queues.",
2539 1239 : &["task_kind", "file_kind", "op_kind", "status"]
2540 1239 : )
2541 1239 : .expect("failed to define a metric")
2542 1239 : });
2543 :
2544 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2545 0 : register_int_counter_vec!(
2546 0 : "pageserver_tenant_task_events",
2547 0 : "Number of task start/stop/fail events.",
2548 0 : &["event"],
2549 0 : )
2550 0 : .expect("Failed to register tenant_task_events metric")
2551 0 : });
2552 :
2553 : pub struct BackgroundLoopSemaphoreMetrics {
2554 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
2555 : durations: EnumMap<BackgroundLoopKind, Histogram>,
2556 : waiting_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2557 : running_tasks: EnumMap<BackgroundLoopKind, IntGauge>,
2558 : }
2559 :
2560 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> =
2561 120 : Lazy::new(|| {
2562 120 : let counters = register_int_counter_pair_vec!(
2563 120 : "pageserver_background_loop_semaphore_wait_start_count",
2564 120 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
2565 120 : "pageserver_background_loop_semaphore_wait_finish_count",
2566 120 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
2567 120 : &["task"],
2568 120 : )
2569 120 : .unwrap();
2570 120 :
2571 120 : let durations = register_histogram_vec!(
2572 120 : "pageserver_background_loop_semaphore_wait_seconds",
2573 120 : "Seconds spent waiting on background loop semaphore acquisition",
2574 120 : &["task"],
2575 120 : vec![0.01, 1.0, 5.0, 10.0, 30.0, 60.0, 180.0, 300.0, 600.0],
2576 120 : )
2577 120 : .unwrap();
2578 120 :
2579 120 : let waiting_tasks = register_int_gauge_vec!(
2580 120 : "pageserver_background_loop_semaphore_waiting_tasks",
2581 120 : "Number of background loop tasks waiting for semaphore",
2582 120 : &["task"],
2583 120 : )
2584 120 : .unwrap();
2585 120 :
2586 120 : let running_tasks = register_int_gauge_vec!(
2587 120 : "pageserver_background_loop_semaphore_running_tasks",
2588 120 : "Number of background loop tasks running concurrently",
2589 120 : &["task"],
2590 120 : )
2591 120 : .unwrap();
2592 120 :
2593 120 : BackgroundLoopSemaphoreMetrics {
2594 1200 : counters: EnumMap::from_array(std::array::from_fn(|i| {
2595 1200 : let kind = BackgroundLoopKind::from_usize(i);
2596 1200 : counters.with_label_values(&[kind.into()])
2597 1200 : })),
2598 1200 : durations: EnumMap::from_array(std::array::from_fn(|i| {
2599 1200 : let kind = BackgroundLoopKind::from_usize(i);
2600 1200 : durations.with_label_values(&[kind.into()])
2601 1200 : })),
2602 1200 : waiting_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2603 1200 : let kind = BackgroundLoopKind::from_usize(i);
2604 1200 : waiting_tasks.with_label_values(&[kind.into()])
2605 1200 : })),
2606 1200 : running_tasks: EnumMap::from_array(std::array::from_fn(|i| {
2607 1200 : let kind = BackgroundLoopKind::from_usize(i);
2608 1200 : running_tasks.with_label_values(&[kind.into()])
2609 1200 : })),
2610 120 : }
2611 120 : });
2612 :
2613 : impl BackgroundLoopSemaphoreMetrics {
2614 : /// Starts recording semaphore metrics. Call `acquired()` on the returned recorder when the
2615 : /// semaphore is acquired, and drop it when the task completes or is cancelled.
2616 2184 : pub(crate) fn record(
2617 2184 : &self,
2618 2184 : task: BackgroundLoopKind,
2619 2184 : ) -> BackgroundLoopSemaphoreMetricsRecorder {
2620 2184 : BackgroundLoopSemaphoreMetricsRecorder::start(self, task)
2621 2184 : }
2622 : }
2623 :
2624 : /// Records metrics for a background task.
2625 : pub struct BackgroundLoopSemaphoreMetricsRecorder<'a> {
2626 : metrics: &'a BackgroundLoopSemaphoreMetrics,
2627 : task: BackgroundLoopKind,
2628 : start: Instant,
2629 : wait_counter_guard: Option<metrics::IntCounterPairGuard>,
2630 : }
2631 :
2632 : impl<'a> BackgroundLoopSemaphoreMetricsRecorder<'a> {
2633 : /// Starts recording semaphore metrics, by recording wait time and incrementing
2634 : /// `wait_start_count` and `waiting_tasks`.
2635 2184 : fn start(metrics: &'a BackgroundLoopSemaphoreMetrics, task: BackgroundLoopKind) -> Self {
2636 2184 : metrics.waiting_tasks[task].inc();
2637 2184 : Self {
2638 2184 : metrics,
2639 2184 : task,
2640 2184 : start: Instant::now(),
2641 2184 : wait_counter_guard: Some(metrics.counters[task].guard()),
2642 2184 : }
2643 2184 : }
2644 :
2645 : /// Signals that the semaphore has been acquired, and updates relevant metrics.
2646 2184 : pub fn acquired(&mut self) -> Duration {
2647 2184 : let waited = self.start.elapsed();
2648 2184 : self.wait_counter_guard.take().expect("already acquired");
2649 2184 : self.metrics.durations[self.task].observe(waited.as_secs_f64());
2650 2184 : self.metrics.waiting_tasks[self.task].dec();
2651 2184 : self.metrics.running_tasks[self.task].inc();
2652 2184 : waited
2653 2184 : }
2654 : }
2655 :
2656 : impl Drop for BackgroundLoopSemaphoreMetricsRecorder<'_> {
2657 : /// The task either completed or was cancelled.
2658 2184 : fn drop(&mut self) {
2659 2184 : if self.wait_counter_guard.take().is_some() {
2660 0 : // Waiting.
2661 0 : self.metrics.durations[self.task].observe(self.start.elapsed().as_secs_f64());
2662 0 : self.metrics.waiting_tasks[self.task].dec();
2663 2184 : } else {
2664 2184 : // Running.
2665 2184 : self.metrics.running_tasks[self.task].dec();
2666 2184 : }
2667 2184 : }
2668 : }
2669 :
2670 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
2671 0 : register_int_counter_vec!(
2672 0 : "pageserver_background_loop_period_overrun_count",
2673 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
2674 0 : &["task", "period"],
2675 0 : )
2676 0 : .expect("failed to define a metric")
2677 0 : });
2678 :
2679 : // walreceiver metrics
2680 :
2681 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
2682 0 : register_int_counter!(
2683 0 : "pageserver_walreceiver_started_connections_total",
2684 0 : "Number of started walreceiver connections"
2685 0 : )
2686 0 : .expect("failed to define a metric")
2687 0 : });
2688 :
2689 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
2690 0 : register_int_gauge!(
2691 0 : "pageserver_walreceiver_active_managers",
2692 0 : "Number of active walreceiver managers"
2693 0 : )
2694 0 : .expect("failed to define a metric")
2695 0 : });
2696 :
2697 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2698 0 : register_int_counter_vec!(
2699 0 : "pageserver_walreceiver_switches_total",
2700 0 : "Number of walreceiver manager change_connection calls",
2701 0 : &["reason"]
2702 0 : )
2703 0 : .expect("failed to define a metric")
2704 0 : });
2705 :
2706 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2707 0 : register_int_counter!(
2708 0 : "pageserver_walreceiver_broker_updates_total",
2709 0 : "Number of received broker updates in walreceiver"
2710 0 : )
2711 0 : .expect("failed to define a metric")
2712 0 : });
2713 :
2714 12 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2715 12 : register_int_counter_vec!(
2716 12 : "pageserver_walreceiver_candidates_events_total",
2717 12 : "Number of walreceiver candidate events",
2718 12 : &["event"]
2719 12 : )
2720 12 : .expect("failed to define a metric")
2721 12 : });
2722 :
2723 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2724 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2725 :
2726 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2727 12 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2728 :
2729 : // Metrics collected on WAL redo operations
2730 : //
2731 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2732 : // for access to the postgres process ('wait') since there is only one for
2733 : // each tenant.
2734 :
2735 : /// Time buckets are small because we want to be able to measure the
2736 : /// smallest redo processing times. These buckets allow us to measure down
2737 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2738 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2739 : ///
2740 : /// Values up to 1s are recorded because metrics show that we have redo
2741 : /// durations and lock times larger than 0.250s.
2742 : macro_rules! redo_histogram_time_buckets {
2743 : () => {
2744 : vec![
2745 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2746 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2747 : 1.000_000,
2748 : ]
2749 : };
2750 : }
2751 :
2752 : /// While we're at it, also measure the amount of records replayed in each
2753 : /// operation. We have a global 'total replayed' counter, but that's not
2754 : /// as useful as 'what is the skew for how many records we replay in one
2755 : /// operation'.
2756 : macro_rules! redo_histogram_count_buckets {
2757 : () => {
2758 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2759 : };
2760 : }
2761 :
2762 : macro_rules! redo_bytes_histogram_count_buckets {
2763 : () => {
2764 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2765 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2766 : vec![
2767 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2768 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2769 : ]
2770 : };
2771 : }
2772 :
2773 : pub(crate) struct WalIngestMetrics {
2774 : pub(crate) bytes_received: IntCounter,
2775 : pub(crate) records_received: IntCounter,
2776 : pub(crate) records_observed: IntCounter,
2777 : pub(crate) records_committed: IntCounter,
2778 : pub(crate) records_filtered: IntCounter,
2779 : pub(crate) values_committed_metadata_images: IntCounter,
2780 : pub(crate) values_committed_metadata_deltas: IntCounter,
2781 : pub(crate) values_committed_data_images: IntCounter,
2782 : pub(crate) values_committed_data_deltas: IntCounter,
2783 : pub(crate) gap_blocks_zeroed_on_rel_extend: IntCounter,
2784 : }
2785 :
2786 : impl WalIngestMetrics {
2787 0 : pub(crate) fn inc_values_committed(&self, stats: &DatadirModificationStats) {
2788 0 : if stats.metadata_images > 0 {
2789 0 : self.values_committed_metadata_images
2790 0 : .inc_by(stats.metadata_images);
2791 0 : }
2792 0 : if stats.metadata_deltas > 0 {
2793 0 : self.values_committed_metadata_deltas
2794 0 : .inc_by(stats.metadata_deltas);
2795 0 : }
2796 0 : if stats.data_images > 0 {
2797 0 : self.values_committed_data_images.inc_by(stats.data_images);
2798 0 : }
2799 0 : if stats.data_deltas > 0 {
2800 0 : self.values_committed_data_deltas.inc_by(stats.data_deltas);
2801 0 : }
2802 0 : }
2803 : }
2804 :
2805 60 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| {
2806 60 : let values_committed = register_int_counter_vec!(
2807 60 : "pageserver_wal_ingest_values_committed",
2808 60 : "Number of values committed to pageserver storage from WAL records",
2809 60 : &["class", "kind"],
2810 60 : )
2811 60 : .expect("failed to define a metric");
2812 60 :
2813 60 : WalIngestMetrics {
2814 60 : bytes_received: register_int_counter!(
2815 60 : "pageserver_wal_ingest_bytes_received",
2816 60 : "Bytes of WAL ingested from safekeepers",
2817 60 : )
2818 60 : .unwrap(),
2819 60 : records_received: register_int_counter!(
2820 60 : "pageserver_wal_ingest_records_received",
2821 60 : "Number of WAL records received from safekeepers"
2822 60 : )
2823 60 : .expect("failed to define a metric"),
2824 60 : records_observed: register_int_counter!(
2825 60 : "pageserver_wal_ingest_records_observed",
2826 60 : "Number of WAL records observed from safekeepers. These are metadata only records for shard 0."
2827 60 : )
2828 60 : .expect("failed to define a metric"),
2829 60 : records_committed: register_int_counter!(
2830 60 : "pageserver_wal_ingest_records_committed",
2831 60 : "Number of WAL records which resulted in writes to pageserver storage"
2832 60 : )
2833 60 : .expect("failed to define a metric"),
2834 60 : records_filtered: register_int_counter!(
2835 60 : "pageserver_wal_ingest_records_filtered",
2836 60 : "Number of WAL records filtered out due to sharding"
2837 60 : )
2838 60 : .expect("failed to define a metric"),
2839 60 : values_committed_metadata_images: values_committed.with_label_values(&["metadata", "image"]),
2840 60 : values_committed_metadata_deltas: values_committed.with_label_values(&["metadata", "delta"]),
2841 60 : values_committed_data_images: values_committed.with_label_values(&["data", "image"]),
2842 60 : values_committed_data_deltas: values_committed.with_label_values(&["data", "delta"]),
2843 60 : gap_blocks_zeroed_on_rel_extend: register_int_counter!(
2844 60 : "pageserver_gap_blocks_zeroed_on_rel_extend",
2845 60 : "Total number of zero gap blocks written on relation extends"
2846 60 : )
2847 60 : .expect("failed to define a metric"),
2848 60 : }
2849 60 : });
2850 :
2851 1284 : pub(crate) static PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED: Lazy<IntCounterVec> = Lazy::new(|| {
2852 1284 : register_int_counter_vec!(
2853 1284 : "pageserver_timeline_wal_records_received",
2854 1284 : "Number of WAL records received per shard",
2855 1284 : &["tenant_id", "shard_id", "timeline_id"]
2856 1284 : )
2857 1284 : .expect("failed to define a metric")
2858 1284 : });
2859 :
2860 36 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2861 36 : register_histogram!(
2862 36 : "pageserver_wal_redo_seconds",
2863 36 : "Time spent on WAL redo",
2864 36 : redo_histogram_time_buckets!()
2865 36 : )
2866 36 : .expect("failed to define a metric")
2867 36 : });
2868 :
2869 36 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2870 36 : register_histogram!(
2871 36 : "pageserver_wal_redo_records_histogram",
2872 36 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2873 36 : redo_histogram_count_buckets!(),
2874 36 : )
2875 36 : .expect("failed to define a metric")
2876 36 : });
2877 :
2878 36 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2879 36 : register_histogram!(
2880 36 : "pageserver_wal_redo_bytes_histogram",
2881 36 : "Histogram of number of records replayed per redo sent to Postgres",
2882 36 : redo_bytes_histogram_count_buckets!(),
2883 36 : )
2884 36 : .expect("failed to define a metric")
2885 36 : });
2886 :
2887 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2888 36 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2889 36 : register_int_counter!(
2890 36 : "pageserver_replayed_wal_records_total",
2891 36 : "Number of WAL records replayed in WAL redo process"
2892 36 : )
2893 36 : .unwrap()
2894 36 : });
2895 :
2896 : #[rustfmt::skip]
2897 48 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2898 48 : register_histogram!(
2899 48 : "pageserver_wal_redo_process_launch_duration",
2900 48 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2901 48 : vec![
2902 48 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2903 48 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2904 48 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2905 48 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2906 48 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2907 48 : ],
2908 48 : )
2909 48 : .expect("failed to define a metric")
2910 48 : });
2911 :
2912 : pub(crate) struct WalRedoProcessCounters {
2913 : pub(crate) started: IntCounter,
2914 : pub(crate) killed_by_cause: EnumMap<WalRedoKillCause, IntCounter>,
2915 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2916 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2917 : }
2918 :
2919 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2920 : pub(crate) enum WalRedoKillCause {
2921 : WalRedoProcessDrop,
2922 : NoLeakChildDrop,
2923 : Startup,
2924 : }
2925 :
2926 : impl Default for WalRedoProcessCounters {
2927 48 : fn default() -> Self {
2928 48 : let started = register_int_counter!(
2929 48 : "pageserver_wal_redo_process_started_total",
2930 48 : "Number of WAL redo processes started",
2931 48 : )
2932 48 : .unwrap();
2933 48 :
2934 48 : let killed = register_int_counter_vec!(
2935 48 : "pageserver_wal_redo_process_stopped_total",
2936 48 : "Number of WAL redo processes stopped",
2937 48 : &["cause"],
2938 48 : )
2939 48 : .unwrap();
2940 48 :
2941 48 : let active_stderr_logger_tasks_started = register_int_counter!(
2942 48 : "pageserver_walredo_stderr_logger_tasks_started_total",
2943 48 : "Number of active walredo stderr logger tasks that have started",
2944 48 : )
2945 48 : .unwrap();
2946 48 :
2947 48 : let active_stderr_logger_tasks_finished = register_int_counter!(
2948 48 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2949 48 : "Number of active walredo stderr logger tasks that have finished",
2950 48 : )
2951 48 : .unwrap();
2952 48 :
2953 48 : Self {
2954 48 : started,
2955 144 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2956 144 : let cause = WalRedoKillCause::from_usize(i);
2957 144 : let cause_str: &'static str = cause.into();
2958 144 : killed.with_label_values(&[cause_str])
2959 144 : })),
2960 48 : active_stderr_logger_tasks_started,
2961 48 : active_stderr_logger_tasks_finished,
2962 48 : }
2963 48 : }
2964 : }
2965 :
2966 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2967 : Lazy::new(WalRedoProcessCounters::default);
2968 :
2969 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2970 : pub(crate) struct StorageTimeMetricsTimer {
2971 : metrics: StorageTimeMetrics,
2972 : start: Instant,
2973 : }
2974 :
2975 : impl StorageTimeMetricsTimer {
2976 13056 : fn new(metrics: StorageTimeMetrics) -> Self {
2977 13056 : Self {
2978 13056 : metrics,
2979 13056 : start: Instant::now(),
2980 13056 : }
2981 13056 : }
2982 :
2983 : /// Returns the elapsed duration of the timer.
2984 13056 : pub fn elapsed(&self) -> Duration {
2985 13056 : self.start.elapsed()
2986 13056 : }
2987 :
2988 : /// Record the time from creation to now and return it.
2989 13056 : pub fn stop_and_record(self) -> Duration {
2990 13056 : let duration = self.elapsed();
2991 13056 : let seconds = duration.as_secs_f64();
2992 13056 : self.metrics.timeline_sum.inc_by(seconds);
2993 13056 : self.metrics.timeline_count.inc();
2994 13056 : self.metrics.global_histogram.observe(seconds);
2995 13056 : duration
2996 13056 : }
2997 :
2998 : /// Turns this timer into a timer, which will always record -- usually this means recording
2999 : /// regardless an early `?` path was taken in a function.
3000 120 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
3001 120 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
3002 120 : }
3003 : }
3004 :
3005 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
3006 :
3007 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
3008 120 : fn drop(&mut self) {
3009 120 : if let Some(inner) = self.0.take() {
3010 120 : inner.stop_and_record();
3011 120 : }
3012 120 : }
3013 : }
3014 :
3015 : impl AlwaysRecordingStorageTimeMetricsTimer {
3016 : /// Returns the elapsed duration of the timer.
3017 0 : pub fn elapsed(&self) -> Duration {
3018 0 : self.0.as_ref().expect("not dropped yet").elapsed()
3019 0 : }
3020 : }
3021 :
3022 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
3023 : /// timeline total sum and count.
3024 : #[derive(Clone, Debug)]
3025 : pub(crate) struct StorageTimeMetrics {
3026 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
3027 : timeline_sum: Counter,
3028 : /// Number of oeprations, per operation, tenant_id and timeline_id
3029 : timeline_count: IntCounter,
3030 : /// Global histogram having only the "operation" label.
3031 : global_histogram: Histogram,
3032 : }
3033 :
3034 : impl StorageTimeMetrics {
3035 25164 : pub fn new(
3036 25164 : operation: StorageTimeOperation,
3037 25164 : tenant_id: &str,
3038 25164 : shard_id: &str,
3039 25164 : timeline_id: &str,
3040 25164 : ) -> Self {
3041 25164 : let operation: &'static str = operation.into();
3042 25164 :
3043 25164 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
3044 25164 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3045 25164 : .unwrap();
3046 25164 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
3047 25164 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
3048 25164 : .unwrap();
3049 25164 : let global_histogram = STORAGE_TIME_GLOBAL
3050 25164 : .get_metric_with_label_values(&[operation])
3051 25164 : .unwrap();
3052 25164 :
3053 25164 : StorageTimeMetrics {
3054 25164 : timeline_sum,
3055 25164 : timeline_count,
3056 25164 : global_histogram,
3057 25164 : }
3058 25164 : }
3059 :
3060 : /// Starts timing a new operation.
3061 : ///
3062 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
3063 13056 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
3064 13056 : StorageTimeMetricsTimer::new(self.clone())
3065 13056 : }
3066 : }
3067 :
3068 : pub(crate) struct TimelineMetrics {
3069 : tenant_id: String,
3070 : shard_id: String,
3071 : timeline_id: String,
3072 : pub flush_time_histo: StorageTimeMetrics,
3073 : pub flush_delay_histo: StorageTimeMetrics,
3074 : pub compact_time_histo: StorageTimeMetrics,
3075 : pub create_images_time_histo: StorageTimeMetrics,
3076 : pub logical_size_histo: StorageTimeMetrics,
3077 : pub imitate_logical_size_histo: StorageTimeMetrics,
3078 : pub load_layer_map_histo: StorageTimeMetrics,
3079 : pub garbage_collect_histo: StorageTimeMetrics,
3080 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
3081 : pub last_record_lsn_gauge: IntGauge,
3082 : pub disk_consistent_lsn_gauge: IntGauge,
3083 : pub pitr_history_size: UIntGauge,
3084 : pub archival_size: UIntGauge,
3085 : pub layers_per_read: Histogram,
3086 : pub standby_horizon_gauge: IntGauge,
3087 : pub resident_physical_size_gauge: UIntGauge,
3088 : pub visible_physical_size_gauge: UIntGauge,
3089 : /// copy of LayeredTimeline.current_logical_size
3090 : pub current_logical_size_gauge: UIntGauge,
3091 : pub aux_file_size_gauge: IntGauge,
3092 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
3093 : pub evictions: IntCounter,
3094 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
3095 : /// Number of valid LSN leases.
3096 : pub valid_lsn_lease_count_gauge: UIntGauge,
3097 : pub wal_records_received: IntCounter,
3098 : pub storage_io_size: StorageIoSizeMetrics,
3099 : pub wait_lsn_in_progress_micros: GlobalAndPerTenantIntCounter,
3100 : pub wait_lsn_start_finish_counterpair: IntCounterPair,
3101 : pub wait_ondemand_download_time: wait_ondemand_download_time::WaitOndemandDownloadTimeSum,
3102 : shutdown: std::sync::atomic::AtomicBool,
3103 : }
3104 :
3105 : impl TimelineMetrics {
3106 2796 : pub fn new(
3107 2796 : tenant_shard_id: &TenantShardId,
3108 2796 : timeline_id_raw: &TimelineId,
3109 2796 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
3110 2796 : ) -> Self {
3111 2796 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3112 2796 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3113 2796 : let timeline_id = timeline_id_raw.to_string();
3114 2796 : let flush_time_histo = StorageTimeMetrics::new(
3115 2796 : StorageTimeOperation::LayerFlush,
3116 2796 : &tenant_id,
3117 2796 : &shard_id,
3118 2796 : &timeline_id,
3119 2796 : );
3120 2796 : let flush_delay_histo = StorageTimeMetrics::new(
3121 2796 : StorageTimeOperation::LayerFlushDelay,
3122 2796 : &tenant_id,
3123 2796 : &shard_id,
3124 2796 : &timeline_id,
3125 2796 : );
3126 2796 : let compact_time_histo = StorageTimeMetrics::new(
3127 2796 : StorageTimeOperation::Compact,
3128 2796 : &tenant_id,
3129 2796 : &shard_id,
3130 2796 : &timeline_id,
3131 2796 : );
3132 2796 : let create_images_time_histo = StorageTimeMetrics::new(
3133 2796 : StorageTimeOperation::CreateImages,
3134 2796 : &tenant_id,
3135 2796 : &shard_id,
3136 2796 : &timeline_id,
3137 2796 : );
3138 2796 : let logical_size_histo = StorageTimeMetrics::new(
3139 2796 : StorageTimeOperation::LogicalSize,
3140 2796 : &tenant_id,
3141 2796 : &shard_id,
3142 2796 : &timeline_id,
3143 2796 : );
3144 2796 : let imitate_logical_size_histo = StorageTimeMetrics::new(
3145 2796 : StorageTimeOperation::ImitateLogicalSize,
3146 2796 : &tenant_id,
3147 2796 : &shard_id,
3148 2796 : &timeline_id,
3149 2796 : );
3150 2796 : let load_layer_map_histo = StorageTimeMetrics::new(
3151 2796 : StorageTimeOperation::LoadLayerMap,
3152 2796 : &tenant_id,
3153 2796 : &shard_id,
3154 2796 : &timeline_id,
3155 2796 : );
3156 2796 : let garbage_collect_histo = StorageTimeMetrics::new(
3157 2796 : StorageTimeOperation::Gc,
3158 2796 : &tenant_id,
3159 2796 : &shard_id,
3160 2796 : &timeline_id,
3161 2796 : );
3162 2796 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
3163 2796 : StorageTimeOperation::FindGcCutoffs,
3164 2796 : &tenant_id,
3165 2796 : &shard_id,
3166 2796 : &timeline_id,
3167 2796 : );
3168 2796 : let last_record_lsn_gauge = LAST_RECORD_LSN
3169 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3170 2796 : .unwrap();
3171 2796 :
3172 2796 : let disk_consistent_lsn_gauge = DISK_CONSISTENT_LSN
3173 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3174 2796 : .unwrap();
3175 2796 :
3176 2796 : let pitr_history_size = PITR_HISTORY_SIZE
3177 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3178 2796 : .unwrap();
3179 2796 :
3180 2796 : let archival_size = TIMELINE_ARCHIVE_SIZE
3181 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3182 2796 : .unwrap();
3183 2796 :
3184 2796 : let layers_per_read = LAYERS_PER_READ
3185 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3186 2796 : .unwrap();
3187 2796 :
3188 2796 : let standby_horizon_gauge = STANDBY_HORIZON
3189 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3190 2796 : .unwrap();
3191 2796 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
3192 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3193 2796 : .unwrap();
3194 2796 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
3195 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3196 2796 : .unwrap();
3197 2796 : // TODO: we shouldn't expose this metric
3198 2796 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
3199 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3200 2796 : .unwrap();
3201 2796 : let aux_file_size_gauge = AUX_FILE_SIZE
3202 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3203 2796 : .unwrap();
3204 2796 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
3205 2796 : let directory_entries_count_gauge_closure = {
3206 2796 : let tenant_shard_id = *tenant_shard_id;
3207 2796 : let timeline_id_raw = *timeline_id_raw;
3208 0 : move || {
3209 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
3210 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
3211 0 : let timeline_id = timeline_id_raw.to_string();
3212 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
3213 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3214 0 : .unwrap();
3215 0 : gauge
3216 0 : }
3217 : };
3218 2796 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
3219 2796 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
3220 2796 : let evictions = EVICTIONS
3221 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3222 2796 : .unwrap();
3223 2796 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
3224 2796 : .build(&tenant_id, &shard_id, &timeline_id);
3225 2796 :
3226 2796 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
3227 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3228 2796 : .unwrap();
3229 2796 :
3230 2796 : let wal_records_received = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED
3231 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3232 2796 : .unwrap();
3233 2796 :
3234 2796 : let storage_io_size = StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id);
3235 2796 :
3236 2796 : let wait_lsn_in_progress_micros = GlobalAndPerTenantIntCounter {
3237 2796 : global: WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS.clone(),
3238 2796 : per_tenant: WAIT_LSN_IN_PROGRESS_MICROS
3239 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3240 2796 : .unwrap(),
3241 2796 : };
3242 2796 :
3243 2796 : let wait_lsn_start_finish_counterpair = WAIT_LSN_START_FINISH_COUNTERPAIR
3244 2796 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
3245 2796 : .unwrap();
3246 2796 :
3247 2796 : let wait_ondemand_download_time =
3248 2796 : wait_ondemand_download_time::WaitOndemandDownloadTimeSum::new(
3249 2796 : &tenant_id,
3250 2796 : &shard_id,
3251 2796 : &timeline_id,
3252 2796 : );
3253 2796 :
3254 2796 : TimelineMetrics {
3255 2796 : tenant_id,
3256 2796 : shard_id,
3257 2796 : timeline_id,
3258 2796 : flush_time_histo,
3259 2796 : flush_delay_histo,
3260 2796 : compact_time_histo,
3261 2796 : create_images_time_histo,
3262 2796 : logical_size_histo,
3263 2796 : imitate_logical_size_histo,
3264 2796 : garbage_collect_histo,
3265 2796 : find_gc_cutoffs_histo,
3266 2796 : load_layer_map_histo,
3267 2796 : last_record_lsn_gauge,
3268 2796 : disk_consistent_lsn_gauge,
3269 2796 : pitr_history_size,
3270 2796 : archival_size,
3271 2796 : layers_per_read,
3272 2796 : standby_horizon_gauge,
3273 2796 : resident_physical_size_gauge,
3274 2796 : visible_physical_size_gauge,
3275 2796 : current_logical_size_gauge,
3276 2796 : aux_file_size_gauge,
3277 2796 : directory_entries_count_gauge,
3278 2796 : evictions,
3279 2796 : evictions_with_low_residence_duration: std::sync::RwLock::new(
3280 2796 : evictions_with_low_residence_duration,
3281 2796 : ),
3282 2796 : storage_io_size,
3283 2796 : valid_lsn_lease_count_gauge,
3284 2796 : wal_records_received,
3285 2796 : wait_lsn_in_progress_micros,
3286 2796 : wait_lsn_start_finish_counterpair,
3287 2796 : wait_ondemand_download_time,
3288 2796 : shutdown: std::sync::atomic::AtomicBool::default(),
3289 2796 : }
3290 2796 : }
3291 :
3292 9516 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
3293 9516 : self.resident_physical_size_add(sz);
3294 9516 : }
3295 :
3296 3260 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
3297 3260 : self.resident_physical_size_gauge.sub(sz);
3298 3260 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
3299 3260 : }
3300 :
3301 10332 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
3302 10332 : self.resident_physical_size_gauge.add(sz);
3303 10332 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
3304 10332 : }
3305 :
3306 60 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
3307 60 : self.resident_physical_size_gauge.get()
3308 60 : }
3309 :
3310 : /// Generates TIMELINE_LAYER labels for a persistent layer.
3311 15877 : fn make_layer_labels(&self, layer_desc: &PersistentLayerDesc) -> [&str; 5] {
3312 15877 : let level = match LayerMap::is_l0(&layer_desc.key_range, layer_desc.is_delta()) {
3313 8540 : true => LayerLevel::L0,
3314 7337 : false => LayerLevel::L1,
3315 : };
3316 15877 : let kind = match layer_desc.is_delta() {
3317 13130 : true => LayerKind::Delta,
3318 2747 : false => LayerKind::Image,
3319 : };
3320 15877 : [
3321 15877 : &self.tenant_id,
3322 15877 : &self.shard_id,
3323 15877 : &self.timeline_id,
3324 15877 : level.into(),
3325 15877 : kind.into(),
3326 15877 : ]
3327 15877 : }
3328 :
3329 : /// Generates TIMELINE_LAYER labels for a frozen ephemeral layer.
3330 14232 : fn make_frozen_layer_labels(&self, _layer: &InMemoryLayer) -> [&str; 5] {
3331 14232 : [
3332 14232 : &self.tenant_id,
3333 14232 : &self.shard_id,
3334 14232 : &self.timeline_id,
3335 14232 : LayerLevel::Frozen.into(),
3336 14232 : LayerKind::Delta.into(), // by definition
3337 14232 : ]
3338 14232 : }
3339 :
3340 : /// Removes a frozen ephemeral layer to TIMELINE_LAYER metrics.
3341 7116 : pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) {
3342 7116 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3343 7116 : let labels = self.make_frozen_layer_labels(layer);
3344 7116 : let size = layer.try_len().expect("frozen layer should have no writer");
3345 7116 : TIMELINE_LAYER_COUNT
3346 7116 : .get_metric_with_label_values(&labels)
3347 7116 : .unwrap()
3348 7116 : .dec();
3349 7116 : TIMELINE_LAYER_SIZE
3350 7116 : .get_metric_with_label_values(&labels)
3351 7116 : .unwrap()
3352 7116 : .sub(size);
3353 7116 : }
3354 :
3355 : /// Adds a frozen ephemeral layer to TIMELINE_LAYER metrics.
3356 7116 : pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) {
3357 7116 : assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. }));
3358 7116 : let labels = self.make_frozen_layer_labels(layer);
3359 7116 : let size = layer.try_len().expect("frozen layer should have no writer");
3360 7116 : TIMELINE_LAYER_COUNT
3361 7116 : .get_metric_with_label_values(&labels)
3362 7116 : .unwrap()
3363 7116 : .inc();
3364 7116 : TIMELINE_LAYER_SIZE
3365 7116 : .get_metric_with_label_values(&labels)
3366 7116 : .unwrap()
3367 7116 : .add(size);
3368 7116 : }
3369 :
3370 : /// Removes a persistent layer from TIMELINE_LAYER metrics.
3371 4153 : pub fn dec_layer(&self, layer_desc: &PersistentLayerDesc) {
3372 4153 : let labels = self.make_layer_labels(layer_desc);
3373 4153 : TIMELINE_LAYER_COUNT
3374 4153 : .get_metric_with_label_values(&labels)
3375 4153 : .unwrap()
3376 4153 : .dec();
3377 4153 : TIMELINE_LAYER_SIZE
3378 4153 : .get_metric_with_label_values(&labels)
3379 4153 : .unwrap()
3380 4153 : .sub(layer_desc.file_size);
3381 4153 : }
3382 :
3383 : /// Adds a persistent layer to TIMELINE_LAYER metrics.
3384 11724 : pub fn inc_layer(&self, layer_desc: &PersistentLayerDesc) {
3385 11724 : let labels = self.make_layer_labels(layer_desc);
3386 11724 : TIMELINE_LAYER_COUNT
3387 11724 : .get_metric_with_label_values(&labels)
3388 11724 : .unwrap()
3389 11724 : .inc();
3390 11724 : TIMELINE_LAYER_SIZE
3391 11724 : .get_metric_with_label_values(&labels)
3392 11724 : .unwrap()
3393 11724 : .add(layer_desc.file_size);
3394 11724 : }
3395 :
3396 60 : pub(crate) fn shutdown(&self) {
3397 60 : let was_shutdown = self
3398 60 : .shutdown
3399 60 : .swap(true, std::sync::atomic::Ordering::Relaxed);
3400 60 :
3401 60 : if was_shutdown {
3402 : // this happens on tenant deletion because tenant first shuts down timelines, then
3403 : // invokes timeline deletion which first shuts down the timeline again.
3404 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
3405 0 : return;
3406 60 : }
3407 60 :
3408 60 : let tenant_id = &self.tenant_id;
3409 60 : let timeline_id = &self.timeline_id;
3410 60 : let shard_id = &self.shard_id;
3411 60 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3412 60 : let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3413 60 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3414 60 : {
3415 60 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
3416 60 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3417 60 : }
3418 60 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3419 60 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3420 60 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
3421 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3422 60 : }
3423 :
3424 60 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3425 60 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3426 :
3427 240 : for ref level in LayerLevel::iter() {
3428 540 : for ref kind in LayerKind::iter() {
3429 360 : let labels: [&str; 5] =
3430 360 : [tenant_id, shard_id, timeline_id, level.into(), kind.into()];
3431 360 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&labels);
3432 360 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&labels);
3433 360 : }
3434 : }
3435 :
3436 60 : let _ = LAYERS_PER_READ.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3437 60 :
3438 60 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3439 60 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3440 60 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3441 60 :
3442 60 : self.evictions_with_low_residence_duration
3443 60 : .write()
3444 60 : .unwrap()
3445 60 : .remove(tenant_id, shard_id, timeline_id);
3446 :
3447 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
3448 : // removed at the end of it. The idea is to have the metrics outlive the
3449 : // entity during which they're observed, e.g., the smgr metrics shall
3450 : // outlive an individual smgr connection, but not the timeline.
3451 :
3452 600 : for op in StorageTimeOperation::VARIANTS {
3453 540 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
3454 540 : op,
3455 540 : tenant_id,
3456 540 : shard_id,
3457 540 : timeline_id,
3458 540 : ]);
3459 540 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
3460 540 : op,
3461 540 : tenant_id,
3462 540 : shard_id,
3463 540 : timeline_id,
3464 540 : ]);
3465 540 : }
3466 :
3467 180 : for op in StorageIoSizeOperation::VARIANTS {
3468 120 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
3469 120 : }
3470 :
3471 : let _ =
3472 60 : WAIT_LSN_IN_PROGRESS_MICROS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3473 60 :
3474 60 : {
3475 60 : let mut res = [Ok(()), Ok(())];
3476 60 : WAIT_LSN_START_FINISH_COUNTERPAIR
3477 60 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id]);
3478 60 : }
3479 60 :
3480 60 : wait_ondemand_download_time::shutdown_timeline(tenant_id, shard_id, timeline_id);
3481 60 :
3482 60 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
3483 60 : SmgrQueryType::GetPageAtLsn.into(),
3484 60 : tenant_id,
3485 60 : shard_id,
3486 60 : timeline_id,
3487 60 : ]);
3488 60 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
3489 60 : SmgrQueryType::GetPageAtLsn.into(),
3490 60 : tenant_id,
3491 60 : shard_id,
3492 60 : timeline_id,
3493 60 : ]);
3494 60 : let _ = PAGE_SERVICE_BATCH_SIZE_PER_TENANT_TIMELINE.remove_label_values(&[
3495 60 : tenant_id,
3496 60 : shard_id,
3497 60 : timeline_id,
3498 60 : ]);
3499 60 : let _ = PAGESERVER_TIMELINE_WAL_RECORDS_RECEIVED.remove_label_values(&[
3500 60 : tenant_id,
3501 60 : shard_id,
3502 60 : timeline_id,
3503 60 : ]);
3504 60 : let _ = PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS.remove_label_values(&[
3505 60 : tenant_id,
3506 60 : shard_id,
3507 60 : timeline_id,
3508 60 : ]);
3509 60 : let _ = PAGE_SERVICE_SMGR_BATCH_WAIT_TIME.remove_label_values(&[
3510 60 : tenant_id,
3511 60 : shard_id,
3512 60 : timeline_id,
3513 60 : ]);
3514 :
3515 480 : for reason in GetPageBatchBreakReason::iter() {
3516 420 : let _ = PAGE_SERVICE_BATCH_BREAK_REASON_PER_TENANT_TIMELINE.remove_label_values(&[
3517 420 : tenant_id,
3518 420 : shard_id,
3519 420 : timeline_id,
3520 420 : reason.into(),
3521 420 : ]);
3522 420 : }
3523 60 : }
3524 : }
3525 :
3526 36 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3527 36 : // Only shard zero deals in synthetic sizes
3528 36 : if tenant_shard_id.is_shard_zero() {
3529 36 : let tid = tenant_shard_id.tenant_id.to_string();
3530 36 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
3531 36 : }
3532 :
3533 36 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
3534 36 :
3535 36 : // we leave the BROKEN_TENANTS_SET entry if any
3536 36 : }
3537 :
3538 : /// Maintain a per timeline gauge in addition to the global gauge.
3539 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
3540 : last_set: AtomicU64,
3541 : gauge: UIntGauge,
3542 : }
3543 :
3544 : impl PerTimelineRemotePhysicalSizeGauge {
3545 2856 : fn new(per_timeline_gauge: UIntGauge) -> Self {
3546 2856 : Self {
3547 2856 : last_set: AtomicU64::new(0),
3548 2856 : gauge: per_timeline_gauge,
3549 2856 : }
3550 2856 : }
3551 11725 : pub(crate) fn set(&self, sz: u64) {
3552 11725 : self.gauge.set(sz);
3553 11725 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
3554 11725 : if sz < prev {
3555 212 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
3556 11513 : } else {
3557 11513 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
3558 11513 : };
3559 11725 : }
3560 12 : pub(crate) fn get(&self) -> u64 {
3561 12 : self.gauge.get()
3562 12 : }
3563 : }
3564 :
3565 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
3566 120 : fn drop(&mut self) {
3567 120 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
3568 120 : }
3569 : }
3570 :
3571 : pub(crate) struct RemoteTimelineClientMetrics {
3572 : tenant_id: String,
3573 : shard_id: String,
3574 : timeline_id: String,
3575 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
3576 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
3577 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3578 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
3579 : pub(crate) projected_remote_consistent_lsn_gauge: UIntGauge,
3580 : }
3581 :
3582 : impl RemoteTimelineClientMetrics {
3583 2856 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
3584 2856 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
3585 2856 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
3586 2856 : let timeline_id_str = timeline_id.to_string();
3587 2856 :
3588 2856 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
3589 2856 : REMOTE_PHYSICAL_SIZE
3590 2856 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3591 2856 : .unwrap(),
3592 2856 : );
3593 2856 :
3594 2856 : let projected_remote_consistent_lsn_gauge = PROJECTED_REMOTE_CONSISTENT_LSN
3595 2856 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
3596 2856 : .unwrap();
3597 2856 :
3598 2856 : RemoteTimelineClientMetrics {
3599 2856 : tenant_id: tenant_id_str,
3600 2856 : shard_id: shard_id_str,
3601 2856 : timeline_id: timeline_id_str,
3602 2856 : calls: Mutex::new(HashMap::default()),
3603 2856 : bytes_started_counter: Mutex::new(HashMap::default()),
3604 2856 : bytes_finished_counter: Mutex::new(HashMap::default()),
3605 2856 : remote_physical_size_gauge,
3606 2856 : projected_remote_consistent_lsn_gauge,
3607 2856 : }
3608 2856 : }
3609 :
3610 18538 : pub fn remote_operation_time(
3611 18538 : &self,
3612 18538 : task_kind: Option<TaskKind>,
3613 18538 : file_kind: &RemoteOpFileKind,
3614 18538 : op_kind: &RemoteOpKind,
3615 18538 : status: &'static str,
3616 18538 : ) -> Histogram {
3617 18538 : REMOTE_TIMELINE_CLIENT_COMPLETION_LATENCY
3618 18538 : .get_metric_with_label_values(&[
3619 18538 : task_kind.as_ref().map(|tk| tk.into()).unwrap_or("unknown"),
3620 18538 : file_kind.as_str(),
3621 18538 : op_kind.as_str(),
3622 18538 : status,
3623 18538 : ])
3624 18538 : .unwrap()
3625 18538 : }
3626 :
3627 43511 : fn calls_counter_pair(
3628 43511 : &self,
3629 43511 : file_kind: &RemoteOpFileKind,
3630 43511 : op_kind: &RemoteOpKind,
3631 43511 : ) -> IntCounterPair {
3632 43511 : let mut guard = self.calls.lock().unwrap();
3633 43511 : let key = (file_kind.as_str(), op_kind.as_str());
3634 43511 : let metric = guard.entry(key).or_insert_with(move || {
3635 5118 : REMOTE_TIMELINE_CLIENT_CALLS
3636 5118 : .get_metric_with_label_values(&[
3637 5118 : &self.tenant_id,
3638 5118 : &self.shard_id,
3639 5118 : &self.timeline_id,
3640 5118 : key.0,
3641 5118 : key.1,
3642 5118 : ])
3643 5118 : .unwrap()
3644 43511 : });
3645 43511 : metric.clone()
3646 43511 : }
3647 :
3648 10596 : fn bytes_started_counter(
3649 10596 : &self,
3650 10596 : file_kind: &RemoteOpFileKind,
3651 10596 : op_kind: &RemoteOpKind,
3652 10596 : ) -> IntCounter {
3653 10596 : let mut guard = self.bytes_started_counter.lock().unwrap();
3654 10596 : let key = (file_kind.as_str(), op_kind.as_str());
3655 10596 : let metric = guard.entry(key).or_insert_with(move || {
3656 2016 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
3657 2016 : .get_metric_with_label_values(&[
3658 2016 : &self.tenant_id,
3659 2016 : &self.shard_id,
3660 2016 : &self.timeline_id,
3661 2016 : key.0,
3662 2016 : key.1,
3663 2016 : ])
3664 2016 : .unwrap()
3665 10596 : });
3666 10596 : metric.clone()
3667 10596 : }
3668 :
3669 20001 : fn bytes_finished_counter(
3670 20001 : &self,
3671 20001 : file_kind: &RemoteOpFileKind,
3672 20001 : op_kind: &RemoteOpKind,
3673 20001 : ) -> IntCounter {
3674 20001 : let mut guard = self.bytes_finished_counter.lock().unwrap();
3675 20001 : let key = (file_kind.as_str(), op_kind.as_str());
3676 20001 : let metric = guard.entry(key).or_insert_with(move || {
3677 2016 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
3678 2016 : .get_metric_with_label_values(&[
3679 2016 : &self.tenant_id,
3680 2016 : &self.shard_id,
3681 2016 : &self.timeline_id,
3682 2016 : key.0,
3683 2016 : key.1,
3684 2016 : ])
3685 2016 : .unwrap()
3686 20001 : });
3687 20001 : metric.clone()
3688 20001 : }
3689 : }
3690 :
3691 : #[cfg(test)]
3692 : impl RemoteTimelineClientMetrics {
3693 36 : pub fn get_bytes_started_counter_value(
3694 36 : &self,
3695 36 : file_kind: &RemoteOpFileKind,
3696 36 : op_kind: &RemoteOpKind,
3697 36 : ) -> Option<u64> {
3698 36 : let guard = self.bytes_started_counter.lock().unwrap();
3699 36 : let key = (file_kind.as_str(), op_kind.as_str());
3700 36 : guard.get(&key).map(|counter| counter.get())
3701 36 : }
3702 :
3703 36 : pub fn get_bytes_finished_counter_value(
3704 36 : &self,
3705 36 : file_kind: &RemoteOpFileKind,
3706 36 : op_kind: &RemoteOpKind,
3707 36 : ) -> Option<u64> {
3708 36 : let guard = self.bytes_finished_counter.lock().unwrap();
3709 36 : let key = (file_kind.as_str(), op_kind.as_str());
3710 36 : guard.get(&key).map(|counter| counter.get())
3711 36 : }
3712 : }
3713 :
3714 : /// See [`RemoteTimelineClientMetrics::call_begin`].
3715 : #[must_use]
3716 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
3717 : /// Decremented on drop.
3718 : calls_counter_pair: Option<IntCounterPair>,
3719 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
3720 : bytes_finished: Option<(IntCounter, u64)>,
3721 : }
3722 :
3723 : impl RemoteTimelineClientCallMetricGuard {
3724 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
3725 : /// The caller vouches to do the metric updates manually.
3726 23036 : pub fn will_decrement_manually(mut self) {
3727 23036 : let RemoteTimelineClientCallMetricGuard {
3728 23036 : calls_counter_pair,
3729 23036 : bytes_finished,
3730 23036 : } = &mut self;
3731 23036 : calls_counter_pair.take();
3732 23036 : bytes_finished.take();
3733 23036 : }
3734 : }
3735 :
3736 : impl Drop for RemoteTimelineClientCallMetricGuard {
3737 23240 : fn drop(&mut self) {
3738 23240 : let RemoteTimelineClientCallMetricGuard {
3739 23240 : calls_counter_pair,
3740 23240 : bytes_finished,
3741 23240 : } = self;
3742 23240 : if let Some(guard) = calls_counter_pair.take() {
3743 204 : guard.dec();
3744 23036 : }
3745 23240 : if let Some((bytes_finished_metric, value)) = bytes_finished {
3746 0 : bytes_finished_metric.inc_by(*value);
3747 23240 : }
3748 23240 : }
3749 : }
3750 :
3751 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
3752 : /// track the byte size of this call in applicable metric(s).
3753 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
3754 : /// Do not account for this call's byte size in any metrics.
3755 : /// The `reason` field is there to make the call sites self-documenting
3756 : /// about why they don't need the metric.
3757 : DontTrackSize { reason: &'static str },
3758 : /// Track the byte size of the call in applicable metric(s).
3759 : Bytes(u64),
3760 : }
3761 :
3762 : impl RemoteTimelineClientMetrics {
3763 : /// Update the metrics that change when a call to the remote timeline client instance starts.
3764 : ///
3765 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
3766 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
3767 : /// is more suitable.
3768 : /// Never do both.
3769 23240 : pub(crate) fn call_begin(
3770 23240 : &self,
3771 23240 : file_kind: &RemoteOpFileKind,
3772 23240 : op_kind: &RemoteOpKind,
3773 23240 : size: RemoteTimelineClientMetricsCallTrackSize,
3774 23240 : ) -> RemoteTimelineClientCallMetricGuard {
3775 23240 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3776 23240 : calls_counter_pair.inc();
3777 :
3778 23240 : let bytes_finished = match size {
3779 12644 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
3780 12644 : // nothing to do
3781 12644 : None
3782 : }
3783 10596 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3784 10596 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
3785 10596 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
3786 10596 : Some((finished_counter, size))
3787 : }
3788 : };
3789 23240 : RemoteTimelineClientCallMetricGuard {
3790 23240 : calls_counter_pair: Some(calls_counter_pair),
3791 23240 : bytes_finished,
3792 23240 : }
3793 23240 : }
3794 :
3795 : /// Manually udpate the metrics that track completions, instead of using the guard object.
3796 : /// Using the guard object is generally preferable.
3797 : /// See [`call_begin`](Self::call_begin) for more context.
3798 20271 : pub(crate) fn call_end(
3799 20271 : &self,
3800 20271 : file_kind: &RemoteOpFileKind,
3801 20271 : op_kind: &RemoteOpKind,
3802 20271 : size: RemoteTimelineClientMetricsCallTrackSize,
3803 20271 : ) {
3804 20271 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
3805 20271 : calls_counter_pair.dec();
3806 20271 : match size {
3807 10866 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
3808 9405 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
3809 9405 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
3810 9405 : }
3811 : }
3812 20271 : }
3813 : }
3814 :
3815 : impl Drop for RemoteTimelineClientMetrics {
3816 120 : fn drop(&mut self) {
3817 120 : let RemoteTimelineClientMetrics {
3818 120 : tenant_id,
3819 120 : shard_id,
3820 120 : timeline_id,
3821 120 : remote_physical_size_gauge,
3822 120 : calls,
3823 120 : bytes_started_counter,
3824 120 : bytes_finished_counter,
3825 120 : projected_remote_consistent_lsn_gauge,
3826 120 : } = self;
3827 144 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
3828 144 : let mut res = [Ok(()), Ok(())];
3829 144 : REMOTE_TIMELINE_CLIENT_CALLS
3830 144 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
3831 144 : // don't care about results
3832 144 : }
3833 120 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
3834 36 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
3835 36 : tenant_id,
3836 36 : shard_id,
3837 36 : timeline_id,
3838 36 : a,
3839 36 : b,
3840 36 : ]);
3841 36 : }
3842 120 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
3843 36 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
3844 36 : tenant_id,
3845 36 : shard_id,
3846 36 : timeline_id,
3847 36 : a,
3848 36 : b,
3849 36 : ]);
3850 36 : }
3851 120 : {
3852 120 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
3853 120 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
3854 120 : }
3855 120 : {
3856 120 : let _ = projected_remote_consistent_lsn_gauge;
3857 120 : let _ = PROJECTED_REMOTE_CONSISTENT_LSN.remove_label_values(&[
3858 120 : tenant_id,
3859 120 : shard_id,
3860 120 : timeline_id,
3861 120 : ]);
3862 120 : }
3863 120 : }
3864 : }
3865 :
3866 : /// Wrapper future that measures the time spent by a remote storage operation,
3867 : /// and records the time and success/failure as a prometheus metric.
3868 : pub(crate) trait MeasureRemoteOp<O, E>: Sized + Future<Output = Result<O, E>> {
3869 19442 : async fn measure_remote_op(
3870 19442 : self,
3871 19442 : task_kind: Option<TaskKind>, // not all caller contexts have a RequestContext / TaskKind handy
3872 19442 : file_kind: RemoteOpFileKind,
3873 19442 : op: RemoteOpKind,
3874 19442 : metrics: Arc<RemoteTimelineClientMetrics>,
3875 19442 : ) -> Result<O, E> {
3876 19442 : let start = Instant::now();
3877 19442 : let res = self.await;
3878 18538 : let duration = start.elapsed();
3879 18538 : let status = if res.is_ok() { &"success" } else { &"failure" };
3880 18538 : metrics
3881 18538 : .remote_operation_time(task_kind, &file_kind, &op, status)
3882 18538 : .observe(duration.as_secs_f64());
3883 18538 : res
3884 18538 : }
3885 : }
3886 :
3887 : impl<Fut, O, E> MeasureRemoteOp<O, E> for Fut where Fut: Sized + Future<Output = Result<O, E>> {}
3888 :
3889 : pub mod tokio_epoll_uring {
3890 : use std::collections::HashMap;
3891 : use std::sync::{Arc, Mutex};
3892 :
3893 : use metrics::{Histogram, LocalHistogram, UIntGauge, register_histogram, register_int_counter};
3894 : use once_cell::sync::Lazy;
3895 :
3896 : /// Shared storage for tokio-epoll-uring thread local metrics.
3897 : pub(crate) static THREAD_LOCAL_METRICS_STORAGE: Lazy<ThreadLocalMetricsStorage> =
3898 738 : Lazy::new(|| {
3899 738 : let slots_submission_queue_depth = register_histogram!(
3900 738 : "pageserver_tokio_epoll_uring_slots_submission_queue_depth",
3901 738 : "The slots waiters queue depth of each tokio_epoll_uring system",
3902 738 : vec![
3903 738 : 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0
3904 738 : ],
3905 738 : )
3906 738 : .expect("failed to define a metric");
3907 738 : ThreadLocalMetricsStorage {
3908 738 : observers: Mutex::new(HashMap::new()),
3909 738 : slots_submission_queue_depth,
3910 738 : }
3911 738 : });
3912 :
3913 : pub struct ThreadLocalMetricsStorage {
3914 : /// List of thread local metrics observers.
3915 : observers: Mutex<HashMap<u64, Arc<ThreadLocalMetrics>>>,
3916 : /// A histogram shared between all thread local systems
3917 : /// for collecting slots submission queue depth.
3918 : slots_submission_queue_depth: Histogram,
3919 : }
3920 :
3921 : /// Each thread-local [`tokio_epoll_uring::System`] gets one of these as its
3922 : /// [`tokio_epoll_uring::metrics::PerSystemMetrics`] generic.
3923 : ///
3924 : /// The System makes observations into [`Self`] and periodically, the collector
3925 : /// comes along and flushes [`Self`] into the shared storage [`THREAD_LOCAL_METRICS_STORAGE`].
3926 : ///
3927 : /// [`LocalHistogram`] is `!Send`, so, we need to put it behind a [`Mutex`].
3928 : /// But except for the periodic flush, the lock is uncontended so there's no waiting
3929 : /// for cache coherence protocol to get an exclusive cache line.
3930 : pub struct ThreadLocalMetrics {
3931 : /// Local observer of thread local tokio-epoll-uring system's slots waiters queue depth.
3932 : slots_submission_queue_depth: Mutex<LocalHistogram>,
3933 : }
3934 :
3935 : impl ThreadLocalMetricsStorage {
3936 : /// Registers a new thread local system. Returns a thread local metrics observer.
3937 3466 : pub fn register_system(&self, id: u64) -> Arc<ThreadLocalMetrics> {
3938 3466 : let per_system_metrics = Arc::new(ThreadLocalMetrics::new(
3939 3466 : self.slots_submission_queue_depth.local(),
3940 3466 : ));
3941 3466 : let mut g = self.observers.lock().unwrap();
3942 3466 : g.insert(id, Arc::clone(&per_system_metrics));
3943 3466 : per_system_metrics
3944 3466 : }
3945 :
3946 : /// Removes metrics observer for a thread local system.
3947 : /// This should be called before dropping a thread local system.
3948 738 : pub fn remove_system(&self, id: u64) {
3949 738 : let mut g = self.observers.lock().unwrap();
3950 738 : g.remove(&id);
3951 738 : }
3952 :
3953 : /// Flush all thread local metrics to the shared storage.
3954 0 : pub fn flush_thread_local_metrics(&self) {
3955 0 : let g = self.observers.lock().unwrap();
3956 0 : g.values().for_each(|local| {
3957 0 : local.flush();
3958 0 : });
3959 0 : }
3960 : }
3961 :
3962 : impl ThreadLocalMetrics {
3963 3466 : pub fn new(slots_submission_queue_depth: LocalHistogram) -> Self {
3964 3466 : ThreadLocalMetrics {
3965 3466 : slots_submission_queue_depth: Mutex::new(slots_submission_queue_depth),
3966 3466 : }
3967 3466 : }
3968 :
3969 : /// Flushes the thread local metrics to shared aggregator.
3970 0 : pub fn flush(&self) {
3971 0 : let Self {
3972 0 : slots_submission_queue_depth,
3973 0 : } = self;
3974 0 : slots_submission_queue_depth.lock().unwrap().flush();
3975 0 : }
3976 : }
3977 :
3978 : impl tokio_epoll_uring::metrics::PerSystemMetrics for ThreadLocalMetrics {
3979 2358536 : fn observe_slots_submission_queue_depth(&self, queue_depth: u64) {
3980 2358536 : let Self {
3981 2358536 : slots_submission_queue_depth,
3982 2358536 : } = self;
3983 2358536 : slots_submission_queue_depth
3984 2358536 : .lock()
3985 2358536 : .unwrap()
3986 2358536 : .observe(queue_depth as f64);
3987 2358536 : }
3988 : }
3989 :
3990 : pub struct Collector {
3991 : descs: Vec<metrics::core::Desc>,
3992 : systems_created: UIntGauge,
3993 : systems_destroyed: UIntGauge,
3994 : thread_local_metrics_storage: &'static ThreadLocalMetricsStorage,
3995 : }
3996 :
3997 : impl metrics::core::Collector for Collector {
3998 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3999 0 : self.descs.iter().collect()
4000 0 : }
4001 :
4002 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
4003 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
4004 0 : let tokio_epoll_uring::metrics::GlobalMetrics {
4005 0 : systems_created,
4006 0 : systems_destroyed,
4007 0 : } = tokio_epoll_uring::metrics::global();
4008 0 : self.systems_created.set(systems_created);
4009 0 : mfs.extend(self.systems_created.collect());
4010 0 : self.systems_destroyed.set(systems_destroyed);
4011 0 : mfs.extend(self.systems_destroyed.collect());
4012 0 :
4013 0 : self.thread_local_metrics_storage
4014 0 : .flush_thread_local_metrics();
4015 0 :
4016 0 : mfs.extend(
4017 0 : self.thread_local_metrics_storage
4018 0 : .slots_submission_queue_depth
4019 0 : .collect(),
4020 0 : );
4021 0 : mfs
4022 0 : }
4023 : }
4024 :
4025 : impl Collector {
4026 : const NMETRICS: usize = 3;
4027 :
4028 : #[allow(clippy::new_without_default)]
4029 0 : pub fn new() -> Self {
4030 0 : let mut descs = Vec::new();
4031 0 :
4032 0 : let systems_created = UIntGauge::new(
4033 0 : "pageserver_tokio_epoll_uring_systems_created",
4034 0 : "counter of tokio-epoll-uring systems that were created",
4035 0 : )
4036 0 : .unwrap();
4037 0 : descs.extend(
4038 0 : metrics::core::Collector::desc(&systems_created)
4039 0 : .into_iter()
4040 0 : .cloned(),
4041 0 : );
4042 0 :
4043 0 : let systems_destroyed = UIntGauge::new(
4044 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
4045 0 : "counter of tokio-epoll-uring systems that were destroyed",
4046 0 : )
4047 0 : .unwrap();
4048 0 : descs.extend(
4049 0 : metrics::core::Collector::desc(&systems_destroyed)
4050 0 : .into_iter()
4051 0 : .cloned(),
4052 0 : );
4053 0 :
4054 0 : Self {
4055 0 : descs,
4056 0 : systems_created,
4057 0 : systems_destroyed,
4058 0 : thread_local_metrics_storage: &THREAD_LOCAL_METRICS_STORAGE,
4059 0 : }
4060 0 : }
4061 : }
4062 :
4063 738 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4064 738 : register_int_counter!(
4065 738 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
4066 738 : "Number of times where thread_local_system creation spanned multiple executor threads",
4067 738 : )
4068 738 : .unwrap()
4069 738 : });
4070 :
4071 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
4072 0 : register_int_counter!(
4073 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
4074 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
4075 0 : )
4076 0 : .unwrap()
4077 0 : });
4078 : }
4079 :
4080 : pub(crate) struct GlobalAndPerTenantIntCounter {
4081 : global: IntCounter,
4082 : per_tenant: IntCounter,
4083 : }
4084 :
4085 : impl GlobalAndPerTenantIntCounter {
4086 : #[inline(always)]
4087 0 : pub(crate) fn inc(&self) {
4088 0 : self.inc_by(1)
4089 0 : }
4090 : #[inline(always)]
4091 1353683 : pub(crate) fn inc_by(&self, n: u64) {
4092 1353683 : self.global.inc_by(n);
4093 1353683 : self.per_tenant.inc_by(n);
4094 1353683 : }
4095 : }
4096 :
4097 : pub(crate) mod tenant_throttling {
4098 : use metrics::register_int_counter_vec;
4099 : use once_cell::sync::Lazy;
4100 : use utils::shard::TenantShardId;
4101 :
4102 : use super::GlobalAndPerTenantIntCounter;
4103 :
4104 : pub(crate) struct Metrics<const KIND: usize> {
4105 : pub(super) count_accounted_start: GlobalAndPerTenantIntCounter,
4106 : pub(super) count_accounted_finish: GlobalAndPerTenantIntCounter,
4107 : pub(super) wait_time: GlobalAndPerTenantIntCounter,
4108 : pub(super) count_throttled: GlobalAndPerTenantIntCounter,
4109 : }
4110 :
4111 1296 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4112 1296 : register_int_counter_vec!(
4113 1296 : "pageserver_tenant_throttling_count_accounted_start_global",
4114 1296 : "Count of tenant throttling starts, by kind of throttle.",
4115 1296 : &["kind"]
4116 1296 : )
4117 1296 : .unwrap()
4118 1296 : });
4119 1296 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4120 1296 : register_int_counter_vec!(
4121 1296 : "pageserver_tenant_throttling_count_accounted_start",
4122 1296 : "Count of tenant throttling starts, by kind of throttle.",
4123 1296 : &["kind", "tenant_id", "shard_id"]
4124 1296 : )
4125 1296 : .unwrap()
4126 1296 : });
4127 1296 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4128 1296 : register_int_counter_vec!(
4129 1296 : "pageserver_tenant_throttling_count_accounted_finish_global",
4130 1296 : "Count of tenant throttling finishes, by kind of throttle.",
4131 1296 : &["kind"]
4132 1296 : )
4133 1296 : .unwrap()
4134 1296 : });
4135 1296 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4136 1296 : register_int_counter_vec!(
4137 1296 : "pageserver_tenant_throttling_count_accounted_finish",
4138 1296 : "Count of tenant throttling finishes, by kind of throttle.",
4139 1296 : &["kind", "tenant_id", "shard_id"]
4140 1296 : )
4141 1296 : .unwrap()
4142 1296 : });
4143 1296 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4144 1296 : register_int_counter_vec!(
4145 1296 : "pageserver_tenant_throttling_wait_usecs_sum_global",
4146 1296 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4147 1296 : &["kind"]
4148 1296 : )
4149 1296 : .unwrap()
4150 1296 : });
4151 1296 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4152 1296 : register_int_counter_vec!(
4153 1296 : "pageserver_tenant_throttling_wait_usecs_sum",
4154 1296 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
4155 1296 : &["kind", "tenant_id", "shard_id"]
4156 1296 : )
4157 1296 : .unwrap()
4158 1296 : });
4159 :
4160 1296 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4161 1296 : register_int_counter_vec!(
4162 1296 : "pageserver_tenant_throttling_count_global",
4163 1296 : "Count of tenant throttlings, by kind of throttle.",
4164 1296 : &["kind"]
4165 1296 : )
4166 1296 : .unwrap()
4167 1296 : });
4168 1296 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
4169 1296 : register_int_counter_vec!(
4170 1296 : "pageserver_tenant_throttling_count",
4171 1296 : "Count of tenant throttlings, by kind of throttle.",
4172 1296 : &["kind", "tenant_id", "shard_id"]
4173 1296 : )
4174 1296 : .unwrap()
4175 1296 : });
4176 :
4177 : const KINDS: &[&str] = &["pagestream"];
4178 : pub type Pagestream = Metrics<0>;
4179 :
4180 : impl<const KIND: usize> Metrics<KIND> {
4181 1404 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
4182 1404 : let per_tenant_label_values = &[
4183 1404 : KINDS[KIND],
4184 1404 : &tenant_shard_id.tenant_id.to_string(),
4185 1404 : &tenant_shard_id.shard_slug().to_string(),
4186 1404 : ];
4187 1404 : Metrics {
4188 1404 : count_accounted_start: {
4189 1404 : GlobalAndPerTenantIntCounter {
4190 1404 : global: COUNT_ACCOUNTED_START.with_label_values(&[KINDS[KIND]]),
4191 1404 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT
4192 1404 : .with_label_values(per_tenant_label_values),
4193 1404 : }
4194 1404 : },
4195 1404 : count_accounted_finish: {
4196 1404 : GlobalAndPerTenantIntCounter {
4197 1404 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KINDS[KIND]]),
4198 1404 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT
4199 1404 : .with_label_values(per_tenant_label_values),
4200 1404 : }
4201 1404 : },
4202 1404 : wait_time: {
4203 1404 : GlobalAndPerTenantIntCounter {
4204 1404 : global: WAIT_USECS.with_label_values(&[KINDS[KIND]]),
4205 1404 : per_tenant: WAIT_USECS_PER_TENANT
4206 1404 : .with_label_values(per_tenant_label_values),
4207 1404 : }
4208 1404 : },
4209 1404 : count_throttled: {
4210 1404 : GlobalAndPerTenantIntCounter {
4211 1404 : global: WAIT_COUNT.with_label_values(&[KINDS[KIND]]),
4212 1404 : per_tenant: WAIT_COUNT_PER_TENANT
4213 1404 : .with_label_values(per_tenant_label_values),
4214 1404 : }
4215 1404 : },
4216 1404 : }
4217 1404 : }
4218 : }
4219 :
4220 0 : pub(crate) fn preinitialize_global_metrics() {
4221 0 : Lazy::force(&COUNT_ACCOUNTED_START);
4222 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
4223 0 : Lazy::force(&WAIT_USECS);
4224 0 : Lazy::force(&WAIT_COUNT);
4225 0 : }
4226 :
4227 36 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
4228 144 : for m in &[
4229 36 : &COUNT_ACCOUNTED_START_PER_TENANT,
4230 36 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
4231 36 : &WAIT_USECS_PER_TENANT,
4232 36 : &WAIT_COUNT_PER_TENANT,
4233 36 : ] {
4234 288 : for kind in KINDS {
4235 144 : let _ = m.remove_label_values(&[
4236 144 : kind,
4237 144 : &tenant_shard_id.tenant_id.to_string(),
4238 144 : &tenant_shard_id.shard_slug().to_string(),
4239 144 : ]);
4240 144 : }
4241 : }
4242 36 : }
4243 : }
4244 :
4245 : pub(crate) mod disk_usage_based_eviction {
4246 : use super::*;
4247 :
4248 : pub(crate) struct Metrics {
4249 : pub(crate) tenant_collection_time: Histogram,
4250 : pub(crate) tenant_layer_count: Histogram,
4251 : pub(crate) layers_collected: IntCounter,
4252 : pub(crate) layers_selected: IntCounter,
4253 : pub(crate) layers_evicted: IntCounter,
4254 : }
4255 :
4256 : impl Default for Metrics {
4257 0 : fn default() -> Self {
4258 0 : let tenant_collection_time = register_histogram!(
4259 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
4260 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
4261 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
4262 0 : )
4263 0 : .unwrap();
4264 0 :
4265 0 : let tenant_layer_count = register_histogram!(
4266 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
4267 0 : "Amount of layers gathered from a tenant",
4268 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
4269 0 : )
4270 0 : .unwrap();
4271 0 :
4272 0 : let layers_collected = register_int_counter!(
4273 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
4274 0 : "Amount of layers collected"
4275 0 : )
4276 0 : .unwrap();
4277 0 :
4278 0 : let layers_selected = register_int_counter!(
4279 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
4280 0 : "Amount of layers selected"
4281 0 : )
4282 0 : .unwrap();
4283 0 :
4284 0 : let layers_evicted = register_int_counter!(
4285 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
4286 0 : "Amount of layers successfully evicted"
4287 0 : )
4288 0 : .unwrap();
4289 0 :
4290 0 : Self {
4291 0 : tenant_collection_time,
4292 0 : tenant_layer_count,
4293 0 : layers_collected,
4294 0 : layers_selected,
4295 0 : layers_evicted,
4296 0 : }
4297 0 : }
4298 : }
4299 :
4300 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
4301 : }
4302 :
4303 1260 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
4304 1260 : register_uint_gauge_vec!(
4305 1260 : "pageserver_tokio_executor_thread_configured_count",
4306 1260 : "Total number of configued tokio executor threads in the process.
4307 1260 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
4308 1260 : &["setup"],
4309 1260 : )
4310 1260 : .unwrap()
4311 1260 : });
4312 :
4313 1260 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
4314 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
4315 1260 : let _guard = SERIALIZE.lock().unwrap();
4316 1260 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
4317 1260 : TOKIO_EXECUTOR_THREAD_COUNT
4318 1260 : .get_metric_with_label_values(&[setup])
4319 1260 : .unwrap()
4320 1260 : .set(u64::try_from(num_threads.get()).unwrap());
4321 1260 : }
4322 :
4323 0 : static PAGESERVER_CONFIG_IGNORED_ITEMS: Lazy<UIntGaugeVec> = Lazy::new(|| {
4324 0 : register_uint_gauge_vec!(
4325 0 : "pageserver_config_ignored_items",
4326 0 : "TOML items present in the on-disk configuration file but ignored by the pageserver config parser.\
4327 0 : The `item` label is the dot-separated path of the ignored item in the on-disk configuration file.\
4328 0 : The value for an unknown config item is always 1.\
4329 0 : There is a special label value \"\", which is 0, so that there is always a metric exposed (simplifies dashboards).",
4330 0 : &["item"]
4331 0 : )
4332 0 : .unwrap()
4333 0 : });
4334 :
4335 0 : pub fn preinitialize_metrics(
4336 0 : conf: &'static PageServerConf,
4337 0 : ignored: config::ignored_fields::Paths,
4338 0 : ) {
4339 0 : set_page_service_config_max_batch_size(&conf.page_service_pipelining);
4340 0 :
4341 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4342 0 : .with_label_values(&[""])
4343 0 : .set(0);
4344 0 : for path in &ignored.paths {
4345 0 : PAGESERVER_CONFIG_IGNORED_ITEMS
4346 0 : .with_label_values(&[path])
4347 0 : .set(1);
4348 0 : }
4349 :
4350 : // Python tests need these and on some we do alerting.
4351 : //
4352 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
4353 : // order:
4354 : // - global metrics reside in a Lazy<PageserverMetrics>
4355 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
4356 : // - could move the statics into TimelineMetrics::new()?
4357 :
4358 : // counters
4359 0 : [
4360 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
4361 0 : &WALRECEIVER_STARTED_CONNECTIONS,
4362 0 : &WALRECEIVER_BROKER_UPDATES,
4363 0 : &WALRECEIVER_CANDIDATES_ADDED,
4364 0 : &WALRECEIVER_CANDIDATES_REMOVED,
4365 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
4366 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
4367 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
4368 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
4369 0 : &CIRCUIT_BREAKERS_BROKEN,
4370 0 : &CIRCUIT_BREAKERS_UNBROKEN,
4371 0 : &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL,
4372 0 : &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS,
4373 0 : ]
4374 0 : .into_iter()
4375 0 : .for_each(|c| {
4376 0 : Lazy::force(c);
4377 0 : });
4378 0 :
4379 0 : // Deletion queue stats
4380 0 : Lazy::force(&DELETION_QUEUE);
4381 0 :
4382 0 : // Tenant stats
4383 0 : Lazy::force(&TENANT);
4384 0 :
4385 0 : // Tenant manager stats
4386 0 : Lazy::force(&TENANT_MANAGER);
4387 0 :
4388 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
4389 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
4390 :
4391 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
4392 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
4393 0 : // values from last restart.
4394 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
4395 0 : }
4396 :
4397 : // countervecs
4398 0 : [
4399 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
4400 0 : &SMGR_QUERY_STARTED_GLOBAL,
4401 0 : &PAGE_SERVICE_BATCH_BREAK_REASON_GLOBAL,
4402 0 : ]
4403 0 : .into_iter()
4404 0 : .for_each(|c| {
4405 0 : Lazy::force(c);
4406 0 : });
4407 0 :
4408 0 : // gauges
4409 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
4410 0 :
4411 0 : // histograms
4412 0 : [
4413 0 : &LAYERS_PER_READ_GLOBAL,
4414 0 : &LAYERS_PER_READ_BATCH_GLOBAL,
4415 0 : &LAYERS_PER_READ_AMORTIZED_GLOBAL,
4416 0 : &DELTAS_PER_READ_GLOBAL,
4417 0 : &WAIT_LSN_TIME,
4418 0 : &WAL_REDO_TIME,
4419 0 : &WAL_REDO_RECORDS_HISTOGRAM,
4420 0 : &WAL_REDO_BYTES_HISTOGRAM,
4421 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
4422 0 : &PAGE_SERVICE_BATCH_SIZE_GLOBAL,
4423 0 : &PAGE_SERVICE_SMGR_BATCH_WAIT_TIME_GLOBAL,
4424 0 : ]
4425 0 : .into_iter()
4426 0 : .for_each(|h| {
4427 0 : Lazy::force(h);
4428 0 : });
4429 0 :
4430 0 : // Custom
4431 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
4432 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
4433 0 : Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
4434 0 :
4435 0 : tenant_throttling::preinitialize_global_metrics();
4436 0 : wait_ondemand_download_time::preinitialize_global_metrics();
4437 0 : }
|