Line data Source code
1 : use enum_map::EnumMap;
2 : use metrics::{
3 : register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
4 : register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
5 : register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
6 : Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
7 : IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
8 : };
9 : use once_cell::sync::Lazy;
10 : use pageserver_api::shard::TenantShardId;
11 : use strum::{EnumCount, VariantNames};
12 : use strum_macros::{IntoStaticStr, VariantNames};
13 : use tracing::warn;
14 : use utils::id::TimelineId;
15 :
16 : /// Prometheus histogram buckets (in seconds) for operations in the critical
17 : /// path. In other words, operations that directly affect that latency of user
18 : /// queries.
19 : ///
20 : /// The buckets capture the majority of latencies in the microsecond and
21 : /// millisecond range but also extend far enough up to distinguish "bad" from
22 : /// "really bad".
23 : const CRITICAL_OP_BUCKETS: &[f64] = &[
24 : 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us
25 : 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms
26 : 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s
27 : ];
28 :
29 : // Metrics collected on operations on the storage repository.
30 9984 : #[derive(Debug, VariantNames, IntoStaticStr)]
31 : #[strum(serialize_all = "kebab_case")]
32 : pub(crate) enum StorageTimeOperation {
33 : #[strum(serialize = "layer flush")]
34 : LayerFlush,
35 :
36 : #[strum(serialize = "compact")]
37 : Compact,
38 :
39 : #[strum(serialize = "create images")]
40 : CreateImages,
41 :
42 : #[strum(serialize = "logical size")]
43 : LogicalSize,
44 :
45 : #[strum(serialize = "imitate logical size")]
46 : ImitateLogicalSize,
47 :
48 : #[strum(serialize = "load layer map")]
49 : LoadLayerMap,
50 :
51 : #[strum(serialize = "gc")]
52 : Gc,
53 :
54 : #[strum(serialize = "find gc cutoffs")]
55 : FindGcCutoffs,
56 : }
57 :
58 516 : pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| {
59 516 : register_counter_vec!(
60 516 : "pageserver_storage_operations_seconds_sum",
61 516 : "Total time spent on storage operations with operation, tenant and timeline dimensions",
62 516 : &["operation", "tenant_id", "shard_id", "timeline_id"],
63 516 : )
64 516 : .expect("failed to define a metric")
65 516 : });
66 :
67 516 : pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
68 516 : register_int_counter_vec!(
69 516 : "pageserver_storage_operations_seconds_count",
70 516 : "Count of storage operations with operation, tenant and timeline dimensions",
71 516 : &["operation", "tenant_id", "shard_id", "timeline_id"],
72 516 : )
73 516 : .expect("failed to define a metric")
74 516 : });
75 :
76 : // Buckets for background operations like compaction, GC, size calculation
77 : const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0];
78 :
79 516 : pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
80 516 : register_histogram_vec!(
81 516 : "pageserver_storage_operations_seconds_global",
82 516 : "Time spent on storage operations",
83 516 : &["operation"],
84 516 : STORAGE_OP_BUCKETS.into(),
85 516 : )
86 516 : .expect("failed to define a metric")
87 516 : });
88 :
89 0 : pub(crate) static READ_NUM_LAYERS_VISITED: Lazy<Histogram> = Lazy::new(|| {
90 0 : register_histogram!(
91 0 : "pageserver_layers_visited_per_read_global",
92 0 : "Number of layers visited to reconstruct one key",
93 0 : vec![1.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
94 0 : )
95 0 : .expect("failed to define a metric")
96 0 : });
97 :
98 504 : pub(crate) static VEC_READ_NUM_LAYERS_VISITED: Lazy<Histogram> = Lazy::new(|| {
99 504 : register_histogram!(
100 504 : "pageserver_layers_visited_per_vectored_read_global",
101 504 : "Average number of layers visited to reconstruct one key",
102 504 : vec![1.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0],
103 504 : )
104 504 : .expect("failed to define a metric")
105 504 : });
106 :
107 : // Metrics collected on operations on the storage repository.
108 : #[derive(
109 2040 : Clone, Copy, enum_map::Enum, strum_macros::EnumString, strum_macros::Display, IntoStaticStr,
110 : )]
111 : pub(crate) enum GetKind {
112 : Singular,
113 : Vectored,
114 : }
115 :
116 : pub(crate) struct ReconstructTimeMetrics {
117 : singular: Histogram,
118 : vectored: Histogram,
119 : }
120 :
121 510 : pub(crate) static RECONSTRUCT_TIME: Lazy<ReconstructTimeMetrics> = Lazy::new(|| {
122 510 : let inner = register_histogram_vec!(
123 510 : "pageserver_getpage_reconstruct_seconds",
124 510 : "Time spent in reconstruct_value (reconstruct a page from deltas)",
125 510 : &["get_kind"],
126 510 : CRITICAL_OP_BUCKETS.into(),
127 510 : )
128 510 : .expect("failed to define a metric");
129 510 :
130 510 : ReconstructTimeMetrics {
131 510 : singular: inner.with_label_values(&[GetKind::Singular.into()]),
132 510 : vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
133 510 : }
134 510 : });
135 :
136 : impl ReconstructTimeMetrics {
137 1880459 : pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
138 1880459 : match get_kind {
139 1879115 : GetKind::Singular => &self.singular,
140 1344 : GetKind::Vectored => &self.vectored,
141 : }
142 1880459 : }
143 : }
144 :
145 : pub(crate) struct ReconstructDataTimeMetrics {
146 : singular: Histogram,
147 : vectored: Histogram,
148 : }
149 :
150 : impl ReconstructDataTimeMetrics {
151 1880507 : pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
152 1880507 : match get_kind {
153 1879163 : GetKind::Singular => &self.singular,
154 1344 : GetKind::Vectored => &self.vectored,
155 : }
156 1880507 : }
157 : }
158 :
159 510 : pub(crate) static GET_RECONSTRUCT_DATA_TIME: Lazy<ReconstructDataTimeMetrics> = Lazy::new(|| {
160 510 : let inner = register_histogram_vec!(
161 510 : "pageserver_getpage_get_reconstruct_data_seconds",
162 510 : "Time spent in get_reconstruct_value_data",
163 510 : &["get_kind"],
164 510 : CRITICAL_OP_BUCKETS.into(),
165 510 : )
166 510 : .expect("failed to define a metric");
167 510 :
168 510 : ReconstructDataTimeMetrics {
169 510 : singular: inner.with_label_values(&[GetKind::Singular.into()]),
170 510 : vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
171 510 : }
172 510 : });
173 :
174 : pub(crate) struct GetVectoredLatency {
175 : map: EnumMap<TaskKind, Option<Histogram>>,
176 : }
177 :
178 : #[allow(dead_code)]
179 : pub(crate) struct ScanLatency {
180 : map: EnumMap<TaskKind, Option<Histogram>>,
181 : }
182 :
183 : impl GetVectoredLatency {
184 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
185 : // cardinality of the metric.
186 : const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler];
187 :
188 3432 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
189 3432 : self.map[task_kind].as_ref()
190 3432 : }
191 : }
192 :
193 : impl ScanLatency {
194 : // Only these task types perform vectored gets. Filter all other tasks out to reduce total
195 : // cardinality of the metric.
196 : const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler];
197 :
198 72 : pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> {
199 72 : self.map[task_kind].as_ref()
200 72 : }
201 : }
202 :
203 : pub(crate) struct ScanLatencyOngoingRecording<'a> {
204 : parent: &'a Histogram,
205 : start: std::time::Instant,
206 : }
207 :
208 : impl<'a> ScanLatencyOngoingRecording<'a> {
209 0 : pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> {
210 0 : let start = Instant::now();
211 0 : ScanLatencyOngoingRecording { parent, start }
212 0 : }
213 :
214 0 : pub(crate) fn observe(self, throttled: Option<Duration>) {
215 0 : let elapsed = self.start.elapsed();
216 0 : let ex_throttled = if let Some(throttled) = throttled {
217 0 : elapsed.checked_sub(throttled)
218 : } else {
219 0 : Some(elapsed)
220 : };
221 0 : if let Some(ex_throttled) = ex_throttled {
222 0 : self.parent.observe(ex_throttled.as_secs_f64());
223 0 : } else {
224 0 : use utils::rate_limit::RateLimit;
225 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
226 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
227 0 : let mut rate_limit = LOGGED.lock().unwrap();
228 0 : rate_limit.call(|| {
229 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
230 0 : });
231 0 : }
232 0 : }
233 : }
234 :
235 492 : pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| {
236 492 : let inner = register_histogram_vec!(
237 492 : "pageserver_get_vectored_seconds",
238 492 : "Time spent in get_vectored, excluding time spent in timeline_get_throttle.",
239 492 : &["task_kind"],
240 492 : CRITICAL_OP_BUCKETS.into(),
241 492 : )
242 492 : .expect("failed to define a metric");
243 492 :
244 492 : GetVectoredLatency {
245 14760 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
246 14760 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
247 14760 :
248 14760 : if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
249 984 : let task_kind = task_kind.into();
250 984 : Some(inner.with_label_values(&[task_kind]))
251 : } else {
252 13776 : None
253 : }
254 14760 : })),
255 492 : }
256 492 : });
257 :
258 24 : pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| {
259 24 : let inner = register_histogram_vec!(
260 24 : "pageserver_scan_seconds",
261 24 : "Time spent in scan, excluding time spent in timeline_get_throttle.",
262 24 : &["task_kind"],
263 24 : CRITICAL_OP_BUCKETS.into(),
264 24 : )
265 24 : .expect("failed to define a metric");
266 24 :
267 24 : ScanLatency {
268 720 : map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| {
269 720 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind_idx);
270 720 :
271 720 : if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) {
272 24 : let task_kind = task_kind.into();
273 24 : Some(inner.with_label_values(&[task_kind]))
274 : } else {
275 696 : None
276 : }
277 720 : })),
278 24 : }
279 24 : });
280 :
281 : pub(crate) struct PageCacheMetricsForTaskKind {
282 : pub read_accesses_immutable: IntCounter,
283 : pub read_hits_immutable: IntCounter,
284 : }
285 :
286 : pub(crate) struct PageCacheMetrics {
287 : map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>,
288 : }
289 :
290 264 : static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| {
291 264 : register_int_counter_vec!(
292 264 : "pageserver_page_cache_read_hits_total",
293 264 : "Number of read accesses to the page cache that hit",
294 264 : &["task_kind", "key_kind", "content_kind", "hit_kind"]
295 264 : )
296 264 : .expect("failed to define a metric")
297 264 : });
298 :
299 264 : static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| {
300 264 : register_int_counter_vec!(
301 264 : "pageserver_page_cache_read_accesses_total",
302 264 : "Number of read accesses to the page cache",
303 264 : &["task_kind", "key_kind", "content_kind"]
304 264 : )
305 264 : .expect("failed to define a metric")
306 264 : });
307 :
308 264 : pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics {
309 7920 : map: EnumMap::from_array(std::array::from_fn(|task_kind| {
310 7920 : let task_kind = <TaskKind as enum_map::Enum>::from_usize(task_kind);
311 7920 : let task_kind: &'static str = task_kind.into();
312 63360 : EnumMap::from_array(std::array::from_fn(|content_kind| {
313 63360 : let content_kind = <PageContentKind as enum_map::Enum>::from_usize(content_kind);
314 63360 : let content_kind: &'static str = content_kind.into();
315 63360 : PageCacheMetricsForTaskKind {
316 63360 : read_accesses_immutable: {
317 63360 : PAGE_CACHE_READ_ACCESSES
318 63360 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind])
319 63360 : .unwrap()
320 63360 : },
321 63360 :
322 63360 : read_hits_immutable: {
323 63360 : PAGE_CACHE_READ_HITS
324 63360 : .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"])
325 63360 : .unwrap()
326 63360 : },
327 63360 : }
328 63360 : }))
329 7920 : })),
330 264 : });
331 :
332 : impl PageCacheMetrics {
333 3302278 : pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind {
334 3302278 : &self.map[ctx.task_kind()][ctx.page_content_kind()]
335 3302278 : }
336 : }
337 :
338 : pub(crate) struct PageCacheSizeMetrics {
339 : pub max_bytes: UIntGauge,
340 :
341 : pub current_bytes_immutable: UIntGauge,
342 : }
343 :
344 264 : static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| {
345 264 : register_uint_gauge_vec!(
346 264 : "pageserver_page_cache_size_current_bytes",
347 264 : "Current size of the page cache in bytes, by key kind",
348 264 : &["key_kind"]
349 264 : )
350 264 : .expect("failed to define a metric")
351 264 : });
352 :
353 : pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> =
354 264 : Lazy::new(|| PageCacheSizeMetrics {
355 264 : max_bytes: {
356 264 : register_uint_gauge!(
357 264 : "pageserver_page_cache_size_max_bytes",
358 264 : "Maximum size of the page cache in bytes"
359 264 : )
360 264 : .expect("failed to define a metric")
361 264 : },
362 264 : current_bytes_immutable: {
363 264 : PAGE_CACHE_SIZE_CURRENT_BYTES
364 264 : .get_metric_with_label_values(&["immutable"])
365 264 : .unwrap()
366 264 : },
367 264 : });
368 :
369 : pub(crate) mod page_cache_eviction_metrics {
370 : use std::num::NonZeroUsize;
371 :
372 : use metrics::{register_int_counter_vec, IntCounter, IntCounterVec};
373 : use once_cell::sync::Lazy;
374 :
375 : #[derive(Clone, Copy)]
376 : pub(crate) enum Outcome {
377 : FoundSlotUnused { iters: NonZeroUsize },
378 : FoundSlotEvicted { iters: NonZeroUsize },
379 : ItersExceeded { iters: NonZeroUsize },
380 : }
381 :
382 264 : static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
383 264 : register_int_counter_vec!(
384 264 : "pageserver_page_cache_find_victim_iters_total",
385 264 : "Counter for the number of iterations in the find_victim loop",
386 264 : &["outcome"],
387 264 : )
388 264 : .expect("failed to define a metric")
389 264 : });
390 :
391 264 : static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| {
392 264 : register_int_counter_vec!(
393 264 : "pageserver_page_cache_find_victim_calls",
394 264 : "Incremented at the end of each find_victim() call.\
395 264 : Filter by outcome to get e.g., eviction rate.",
396 264 : &["outcome"]
397 264 : )
398 264 : .unwrap()
399 264 : });
400 :
401 96615 : pub(crate) fn observe(outcome: Outcome) {
402 : macro_rules! dry {
403 : ($label:literal, $iters:expr) => {{
404 : static LABEL: &'static str = $label;
405 : static ITERS_TOTAL: Lazy<IntCounter> =
406 324 : Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL]));
407 : static CALLS: Lazy<IntCounter> =
408 324 : Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL]));
409 : ITERS_TOTAL.inc_by(($iters.get()) as u64);
410 : CALLS.inc();
411 : }};
412 : }
413 96615 : match outcome {
414 4452 : Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters),
415 92163 : Outcome::FoundSlotEvicted { iters } => {
416 92163 : dry!("found_evicted", iters)
417 : }
418 0 : Outcome::ItersExceeded { iters } => {
419 0 : dry!("err_iters_exceeded", iters);
420 0 : super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit);
421 0 : }
422 : }
423 96615 : }
424 : }
425 :
426 0 : static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| {
427 0 : register_int_counter_vec!(
428 0 : "page_cache_errors_total",
429 0 : "Number of timeouts while acquiring a pinned slot in the page cache",
430 0 : &["error_kind"]
431 0 : )
432 0 : .expect("failed to define a metric")
433 0 : });
434 :
435 0 : #[derive(IntoStaticStr)]
436 : #[strum(serialize_all = "kebab_case")]
437 : pub(crate) enum PageCacheErrorKind {
438 : AcquirePinnedSlotTimeout,
439 : EvictIterLimit,
440 : }
441 :
442 0 : pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) {
443 0 : PAGE_CACHE_ERRORS
444 0 : .get_metric_with_label_values(&[error_kind.into()])
445 0 : .unwrap()
446 0 : .inc();
447 0 : }
448 :
449 54 : pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
450 54 : register_histogram!(
451 54 : "pageserver_wait_lsn_seconds",
452 54 : "Time spent waiting for WAL to arrive",
453 54 : CRITICAL_OP_BUCKETS.into(),
454 54 : )
455 54 : .expect("failed to define a metric")
456 54 : });
457 :
458 516 : static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
459 516 : register_int_gauge_vec!(
460 516 : "pageserver_last_record_lsn",
461 516 : "Last record LSN grouped by timeline",
462 516 : &["tenant_id", "shard_id", "timeline_id"]
463 516 : )
464 516 : .expect("failed to define a metric")
465 516 : });
466 :
467 516 : static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
468 516 : register_uint_gauge_vec!(
469 516 : "pageserver_pitr_history_size",
470 516 : "Data written since PITR cutoff on this timeline",
471 516 : &["tenant_id", "shard_id", "timeline_id"]
472 516 : )
473 516 : .expect("failed to define a metric")
474 516 : });
475 :
476 5088 : #[derive(strum_macros::EnumString, strum_macros::Display, strum_macros::IntoStaticStr)]
477 : #[strum(serialize_all = "kebab_case")]
478 : pub(crate) enum MetricLayerKind {
479 : Delta,
480 : Image,
481 : }
482 :
483 516 : static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
484 516 : register_uint_gauge_vec!(
485 516 : "pageserver_layer_bytes",
486 516 : "Sum of layer physical sizes in bytes",
487 516 : &["tenant_id", "shard_id", "timeline_id", "kind"]
488 516 : )
489 516 : .expect("failed to define a metric")
490 516 : });
491 :
492 516 : static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
493 516 : register_uint_gauge_vec!(
494 516 : "pageserver_layer_count",
495 516 : "Number of layers that exist",
496 516 : &["tenant_id", "shard_id", "timeline_id", "kind"]
497 516 : )
498 516 : .expect("failed to define a metric")
499 516 : });
500 :
501 516 : static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
502 516 : register_uint_gauge_vec!(
503 516 : "pageserver_archive_size",
504 516 : "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero",
505 516 : &["tenant_id", "shard_id", "timeline_id"]
506 516 : )
507 516 : .expect("failed to define a metric")
508 516 : });
509 :
510 516 : static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| {
511 516 : register_int_gauge_vec!(
512 516 : "pageserver_standby_horizon",
513 516 : "Standby apply LSN for which GC is hold off, by timeline.",
514 516 : &["tenant_id", "shard_id", "timeline_id"]
515 516 : )
516 516 : .expect("failed to define a metric")
517 516 : });
518 :
519 516 : static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
520 516 : register_uint_gauge_vec!(
521 516 : "pageserver_resident_physical_size",
522 516 : "The size of the layer files present in the pageserver's filesystem, for attached locations.",
523 516 : &["tenant_id", "shard_id", "timeline_id"]
524 516 : )
525 516 : .expect("failed to define a metric")
526 516 : });
527 :
528 516 : static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
529 516 : register_uint_gauge_vec!(
530 516 : "pageserver_visible_physical_size",
531 516 : "The size of the layer files present in the pageserver's filesystem.",
532 516 : &["tenant_id", "shard_id", "timeline_id"]
533 516 : )
534 516 : .expect("failed to define a metric")
535 516 : });
536 :
537 504 : pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
538 504 : register_uint_gauge!(
539 504 : "pageserver_resident_physical_size_global",
540 504 : "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions."
541 504 : )
542 504 : .expect("failed to define a metric")
543 504 : });
544 :
545 516 : static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
546 516 : register_uint_gauge_vec!(
547 516 : "pageserver_remote_physical_size",
548 516 : "The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
549 516 : // Corollary: If any files are missing from the index part, they won't be included here.
550 516 : &["tenant_id", "shard_id", "timeline_id"]
551 516 : )
552 516 : .expect("failed to define a metric")
553 516 : });
554 :
555 516 : static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| {
556 516 : register_uint_gauge!(
557 516 : "pageserver_remote_physical_size_global",
558 516 : "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions."
559 516 : )
560 516 : .expect("failed to define a metric")
561 516 : });
562 :
563 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| {
564 12 : register_int_counter!(
565 12 : "pageserver_remote_ondemand_downloaded_layers_total",
566 12 : "Total on-demand downloaded layers"
567 12 : )
568 12 : .unwrap()
569 12 : });
570 :
571 12 : pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| {
572 12 : register_int_counter!(
573 12 : "pageserver_remote_ondemand_downloaded_bytes_total",
574 12 : "Total bytes of layers on-demand downloaded",
575 12 : )
576 12 : .unwrap()
577 12 : });
578 :
579 516 : static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
580 516 : register_uint_gauge_vec!(
581 516 : "pageserver_current_logical_size",
582 516 : "Current logical size grouped by timeline",
583 516 : &["tenant_id", "shard_id", "timeline_id"]
584 516 : )
585 516 : .expect("failed to define current logical size metric")
586 516 : });
587 :
588 516 : static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
589 516 : register_int_gauge_vec!(
590 516 : "pageserver_aux_file_estimated_size",
591 516 : "The size of all aux files for a timeline in aux file v2 store.",
592 516 : &["tenant_id", "shard_id", "timeline_id"]
593 516 : )
594 516 : .expect("failed to define a metric")
595 516 : });
596 :
597 516 : static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
598 516 : register_uint_gauge_vec!(
599 516 : "pageserver_valid_lsn_lease_count",
600 516 : "The number of valid leases after refreshing gc info.",
601 516 : &["tenant_id", "shard_id", "timeline_id"],
602 516 : )
603 516 : .expect("failed to define a metric")
604 516 : });
605 :
606 0 : pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| {
607 0 : register_int_counter!(
608 0 : "pageserver_circuit_breaker_broken",
609 0 : "How many times a circuit breaker has broken"
610 0 : )
611 0 : .expect("failed to define a metric")
612 0 : });
613 :
614 0 : pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| {
615 0 : register_int_counter!(
616 0 : "pageserver_circuit_breaker_unbroken",
617 0 : "How many times a circuit breaker has been un-broken (recovered)"
618 0 : )
619 0 : .expect("failed to define a metric")
620 0 : });
621 :
622 492 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
623 492 : register_int_counter!(
624 492 : "pageserver_compression_image_in_bytes_total",
625 492 : "Size of data written into image layers before compression"
626 492 : )
627 492 : .expect("failed to define a metric")
628 492 : });
629 :
630 492 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| {
631 492 : register_int_counter!(
632 492 : "pageserver_compression_image_in_bytes_considered",
633 492 : "Size of potentially compressible data written into image layers before compression"
634 492 : )
635 492 : .expect("failed to define a metric")
636 492 : });
637 :
638 492 : pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| {
639 492 : register_int_counter!(
640 492 : "pageserver_compression_image_in_bytes_chosen",
641 492 : "Size of data whose compressed form was written into image layers"
642 492 : )
643 492 : .expect("failed to define a metric")
644 492 : });
645 :
646 492 : pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| {
647 492 : register_int_counter!(
648 492 : "pageserver_compression_image_out_bytes_total",
649 492 : "Size of compressed image layer written"
650 492 : )
651 492 : .expect("failed to define a metric")
652 492 : });
653 :
654 : pub(crate) mod initial_logical_size {
655 : use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
656 : use once_cell::sync::Lazy;
657 :
658 : pub(crate) struct StartCalculation(IntCounterVec);
659 516 : pub(crate) static START_CALCULATION: Lazy<StartCalculation> = Lazy::new(|| {
660 516 : StartCalculation(
661 516 : register_int_counter_vec!(
662 516 : "pageserver_initial_logical_size_start_calculation",
663 516 : "Incremented each time we start an initial logical size calculation attempt. \
664 516 : The `circumstances` label provides some additional details.",
665 516 : &["attempt", "circumstances"]
666 516 : )
667 516 : .unwrap(),
668 516 : )
669 516 : });
670 :
671 : struct DropCalculation {
672 : first: IntCounter,
673 : retry: IntCounter,
674 : }
675 :
676 516 : static DROP_CALCULATION: Lazy<DropCalculation> = Lazy::new(|| {
677 516 : let vec = register_int_counter_vec!(
678 516 : "pageserver_initial_logical_size_drop_calculation",
679 516 : "Incremented each time we abort a started size calculation attmpt.",
680 516 : &["attempt"]
681 516 : )
682 516 : .unwrap();
683 516 : DropCalculation {
684 516 : first: vec.with_label_values(&["first"]),
685 516 : retry: vec.with_label_values(&["retry"]),
686 516 : }
687 516 : });
688 :
689 : pub(crate) struct Calculated {
690 : pub(crate) births: IntCounter,
691 : pub(crate) deaths: IntCounter,
692 : }
693 :
694 516 : pub(crate) static CALCULATED: Lazy<Calculated> = Lazy::new(|| Calculated {
695 516 : births: register_int_counter!(
696 516 : "pageserver_initial_logical_size_finish_calculation",
697 516 : "Incremented every time we finish calculation of initial logical size.\
698 516 : If everything is working well, this should happen at most once per Timeline object."
699 516 : )
700 516 : .unwrap(),
701 516 : deaths: register_int_counter!(
702 516 : "pageserver_initial_logical_size_drop_finished_calculation",
703 516 : "Incremented when we drop a finished initial logical size calculation result.\
704 516 : Mainly useful to turn pageserver_initial_logical_size_finish_calculation into a gauge."
705 516 : )
706 516 : .unwrap(),
707 516 : });
708 :
709 : pub(crate) struct OngoingCalculationGuard {
710 : inc_drop_calculation: Option<IntCounter>,
711 : }
712 :
713 552 : #[derive(strum_macros::IntoStaticStr)]
714 : pub(crate) enum StartCircumstances {
715 : EmptyInitial,
716 : SkippedConcurrencyLimiter,
717 : AfterBackgroundTasksRateLimit,
718 : }
719 :
720 : impl StartCalculation {
721 552 : pub(crate) fn first(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
722 552 : let circumstances_label: &'static str = circumstances.into();
723 552 : self.0
724 552 : .with_label_values(&["first", circumstances_label])
725 552 : .inc();
726 552 : OngoingCalculationGuard {
727 552 : inc_drop_calculation: Some(DROP_CALCULATION.first.clone()),
728 552 : }
729 552 : }
730 0 : pub(crate) fn retry(&self, circumstances: StartCircumstances) -> OngoingCalculationGuard {
731 0 : let circumstances_label: &'static str = circumstances.into();
732 0 : self.0
733 0 : .with_label_values(&["retry", circumstances_label])
734 0 : .inc();
735 0 : OngoingCalculationGuard {
736 0 : inc_drop_calculation: Some(DROP_CALCULATION.retry.clone()),
737 0 : }
738 0 : }
739 : }
740 :
741 : impl Drop for OngoingCalculationGuard {
742 552 : fn drop(&mut self) {
743 552 : if let Some(counter) = self.inc_drop_calculation.take() {
744 0 : counter.inc();
745 552 : }
746 552 : }
747 : }
748 :
749 : impl OngoingCalculationGuard {
750 552 : pub(crate) fn calculation_result_saved(mut self) -> FinishedCalculationGuard {
751 552 : drop(self.inc_drop_calculation.take());
752 552 : CALCULATED.births.inc();
753 552 : FinishedCalculationGuard {
754 552 : inc_on_drop: CALCULATED.deaths.clone(),
755 552 : }
756 552 : }
757 : }
758 :
759 : pub(crate) struct FinishedCalculationGuard {
760 : inc_on_drop: IntCounter,
761 : }
762 :
763 : impl Drop for FinishedCalculationGuard {
764 18 : fn drop(&mut self) {
765 18 : self.inc_on_drop.inc();
766 18 : }
767 : }
768 :
769 : // context: https://github.com/neondatabase/neon/issues/5963
770 : pub(crate) static TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE: Lazy<IntCounter> =
771 0 : Lazy::new(|| {
772 0 : register_int_counter!(
773 0 : "pageserver_initial_logical_size_timelines_where_walreceiver_got_approximate_size",
774 0 : "Counter for the following event: walreceiver calls\
775 0 : Timeline::get_current_logical_size() and it returns `Approximate` for the first time."
776 0 : )
777 0 : .unwrap()
778 0 : });
779 : }
780 :
781 0 : static DIRECTORY_ENTRIES_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
782 0 : register_uint_gauge_vec!(
783 0 : "pageserver_directory_entries_count",
784 0 : "Sum of the entries in pageserver-stored directory listings",
785 0 : &["tenant_id", "shard_id", "timeline_id"]
786 0 : )
787 0 : .expect("failed to define a metric")
788 0 : });
789 :
790 522 : pub(crate) static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
791 522 : register_uint_gauge_vec!(
792 522 : "pageserver_tenant_states_count",
793 522 : "Count of tenants per state",
794 522 : &["state"]
795 522 : )
796 522 : .expect("Failed to register pageserver_tenant_states_count metric")
797 522 : });
798 :
799 : /// A set of broken tenants.
800 : ///
801 : /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken
802 : /// tenant.
803 35 : pub(crate) static BROKEN_TENANTS_SET: Lazy<UIntGaugeVec> = Lazy::new(|| {
804 35 : register_uint_gauge_vec!(
805 35 : "pageserver_broken_tenants_count",
806 35 : "Set of broken tenants",
807 35 : &["tenant_id", "shard_id"]
808 35 : )
809 35 : .expect("Failed to register pageserver_tenant_states_count metric")
810 35 : });
811 :
812 18 : pub(crate) static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
813 18 : register_uint_gauge_vec!(
814 18 : "pageserver_tenant_synthetic_cached_size_bytes",
815 18 : "Synthetic size of each tenant in bytes",
816 18 : &["tenant_id"]
817 18 : )
818 18 : .expect("Failed to register pageserver_tenant_synthetic_cached_size_bytes metric")
819 18 : });
820 :
821 0 : pub(crate) static EVICTION_ITERATION_DURATION: Lazy<HistogramVec> = Lazy::new(|| {
822 0 : register_histogram_vec!(
823 0 : "pageserver_eviction_iteration_duration_seconds_global",
824 0 : "Time spent on a single eviction iteration",
825 0 : &["period_secs", "threshold_secs"],
826 0 : STORAGE_OP_BUCKETS.into(),
827 0 : )
828 0 : .expect("failed to define a metric")
829 0 : });
830 :
831 516 : static EVICTIONS: Lazy<IntCounterVec> = Lazy::new(|| {
832 516 : register_int_counter_vec!(
833 516 : "pageserver_evictions",
834 516 : "Number of layers evicted from the pageserver",
835 516 : &["tenant_id", "shard_id", "timeline_id"]
836 516 : )
837 516 : .expect("failed to define a metric")
838 516 : });
839 :
840 516 : static EVICTIONS_WITH_LOW_RESIDENCE_DURATION: Lazy<IntCounterVec> = Lazy::new(|| {
841 516 : register_int_counter_vec!(
842 516 : "pageserver_evictions_with_low_residence_duration",
843 516 : "If a layer is evicted that was resident for less than `low_threshold`, it is counted to this counter. \
844 516 : Residence duration is determined using the `residence_duration_data_source`.",
845 516 : &["tenant_id", "shard_id", "timeline_id", "residence_duration_data_source", "low_threshold_secs"]
846 516 : )
847 516 : .expect("failed to define a metric")
848 516 : });
849 :
850 0 : pub(crate) static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
851 0 : register_int_counter!(
852 0 : "pageserver_unexpected_ondemand_downloads_count",
853 0 : "Number of unexpected on-demand downloads. \
854 0 : We log more context for each increment, so, forgo any labels in this metric.",
855 0 : )
856 0 : .expect("failed to define a metric")
857 0 : });
858 :
859 : /// How long did we take to start up? Broken down by labels to describe
860 : /// different phases of startup.
861 0 : pub static STARTUP_DURATION: Lazy<GaugeVec> = Lazy::new(|| {
862 0 : register_gauge_vec!(
863 0 : "pageserver_startup_duration_seconds",
864 0 : "Time taken by phases of pageserver startup, in seconds",
865 0 : &["phase"]
866 0 : )
867 0 : .expect("Failed to register pageserver_startup_duration_seconds metric")
868 0 : });
869 :
870 0 : pub static STARTUP_IS_LOADING: Lazy<UIntGauge> = Lazy::new(|| {
871 0 : register_uint_gauge!(
872 0 : "pageserver_startup_is_loading",
873 0 : "1 while in initial startup load of tenants, 0 at other times"
874 0 : )
875 0 : .expect("Failed to register pageserver_startup_is_loading")
876 0 : });
877 :
878 504 : pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
879 504 : register_uint_gauge!(
880 504 : "pageserver_timeline_ephemeral_bytes",
881 504 : "Total number of bytes in ephemeral layers, summed for all timelines. Approximate, lazily updated."
882 504 : )
883 504 : .expect("Failed to register metric")
884 504 : });
885 :
886 : /// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
887 : /// like how long it took to load.
888 : ///
889 : /// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
890 : /// metrics are rather expensive, and usually fine grained stuff makes more sense
891 : /// at a timeline level than tenant level.
892 : pub(crate) struct TenantMetrics {
893 : /// How long did tenants take to go from construction to active state?
894 : pub(crate) activation: Histogram,
895 : pub(crate) preload: Histogram,
896 : pub(crate) attach: Histogram,
897 :
898 : /// How many tenants are included in the initial startup of the pagesrever?
899 : pub(crate) startup_scheduled: IntCounter,
900 : pub(crate) startup_complete: IntCounter,
901 : }
902 :
903 0 : pub(crate) static TENANT: Lazy<TenantMetrics> = Lazy::new(|| {
904 0 : TenantMetrics {
905 0 : activation: register_histogram!(
906 0 : "pageserver_tenant_activation_seconds",
907 0 : "Time taken by tenants to activate, in seconds",
908 0 : CRITICAL_OP_BUCKETS.into()
909 0 : )
910 0 : .expect("Failed to register metric"),
911 0 : preload: register_histogram!(
912 0 : "pageserver_tenant_preload_seconds",
913 0 : "Time taken by tenants to load remote metadata on startup/attach, in seconds",
914 0 : CRITICAL_OP_BUCKETS.into()
915 0 : )
916 0 : .expect("Failed to register metric"),
917 0 : attach: register_histogram!(
918 0 : "pageserver_tenant_attach_seconds",
919 0 : "Time taken by tenants to intialize, after remote metadata is already loaded",
920 0 : CRITICAL_OP_BUCKETS.into()
921 0 : )
922 0 : .expect("Failed to register metric"),
923 0 : startup_scheduled: register_int_counter!(
924 0 : "pageserver_tenant_startup_scheduled",
925 0 : "Number of tenants included in pageserver startup (doesn't count tenants attached later)"
926 0 : ).expect("Failed to register metric"),
927 0 : startup_complete: register_int_counter!(
928 0 : "pageserver_tenant_startup_complete",
929 0 : "Number of tenants that have completed warm-up, or activated on-demand during initial startup: \
930 0 : should eventually reach `pageserver_tenant_startup_scheduled_total`. Does not include broken \
931 0 : tenants: such cases will lead to this metric never reaching the scheduled count."
932 0 : ).expect("Failed to register metric"),
933 0 : }
934 0 : });
935 :
936 : /// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
937 : #[derive(Debug)]
938 : pub(crate) struct EvictionsWithLowResidenceDuration {
939 : data_source: &'static str,
940 : threshold: Duration,
941 : counter: Option<IntCounter>,
942 : }
943 :
944 : pub(crate) struct EvictionsWithLowResidenceDurationBuilder {
945 : data_source: &'static str,
946 : threshold: Duration,
947 : }
948 :
949 : impl EvictionsWithLowResidenceDurationBuilder {
950 1248 : pub fn new(data_source: &'static str, threshold: Duration) -> Self {
951 1248 : Self {
952 1248 : data_source,
953 1248 : threshold,
954 1248 : }
955 1248 : }
956 :
957 1248 : fn build(
958 1248 : &self,
959 1248 : tenant_id: &str,
960 1248 : shard_id: &str,
961 1248 : timeline_id: &str,
962 1248 : ) -> EvictionsWithLowResidenceDuration {
963 1248 : let counter = EVICTIONS_WITH_LOW_RESIDENCE_DURATION
964 1248 : .get_metric_with_label_values(&[
965 1248 : tenant_id,
966 1248 : shard_id,
967 1248 : timeline_id,
968 1248 : self.data_source,
969 1248 : &EvictionsWithLowResidenceDuration::threshold_label_value(self.threshold),
970 1248 : ])
971 1248 : .unwrap();
972 1248 : EvictionsWithLowResidenceDuration {
973 1248 : data_source: self.data_source,
974 1248 : threshold: self.threshold,
975 1248 : counter: Some(counter),
976 1248 : }
977 1248 : }
978 : }
979 :
980 : impl EvictionsWithLowResidenceDuration {
981 1272 : fn threshold_label_value(threshold: Duration) -> String {
982 1272 : format!("{}", threshold.as_secs())
983 1272 : }
984 :
985 12 : pub fn observe(&self, observed_value: Duration) {
986 12 : if observed_value < self.threshold {
987 12 : self.counter
988 12 : .as_ref()
989 12 : .expect("nobody calls this function after `remove_from_vec`")
990 12 : .inc();
991 12 : }
992 12 : }
993 :
994 24 : pub fn change_threshold(
995 24 : &mut self,
996 24 : tenant_id: &str,
997 24 : shard_id: &str,
998 24 : timeline_id: &str,
999 24 : new_threshold: Duration,
1000 24 : ) {
1001 24 : if new_threshold == self.threshold {
1002 24 : return;
1003 0 : }
1004 0 : let mut with_new = EvictionsWithLowResidenceDurationBuilder::new(
1005 0 : self.data_source,
1006 0 : new_threshold,
1007 0 : )
1008 0 : .build(tenant_id, shard_id, timeline_id);
1009 0 : std::mem::swap(self, &mut with_new);
1010 0 : with_new.remove(tenant_id, shard_id, timeline_id);
1011 24 : }
1012 :
1013 : // This could be a `Drop` impl, but, we need the `tenant_id` and `timeline_id`.
1014 24 : fn remove(&mut self, tenant_id: &str, shard_id: &str, timeline_id: &str) {
1015 24 : let Some(_counter) = self.counter.take() else {
1016 0 : return;
1017 : };
1018 :
1019 24 : let threshold = Self::threshold_label_value(self.threshold);
1020 24 :
1021 24 : let removed = EVICTIONS_WITH_LOW_RESIDENCE_DURATION.remove_label_values(&[
1022 24 : tenant_id,
1023 24 : shard_id,
1024 24 : timeline_id,
1025 24 : self.data_source,
1026 24 : &threshold,
1027 24 : ]);
1028 24 :
1029 24 : match removed {
1030 0 : Err(e) => {
1031 0 : // this has been hit in staging as
1032 0 : // <https://neondatabase.sentry.io/issues/4142396994/>, but we don't know how.
1033 0 : // because we can be in the drop path already, don't risk:
1034 0 : // - "double-panic => illegal instruction" or
1035 0 : // - future "drop panick => abort"
1036 0 : //
1037 0 : // so just nag: (the error has the labels)
1038 0 : tracing::warn!("failed to remove EvictionsWithLowResidenceDuration, it was already removed? {e:#?}");
1039 : }
1040 : Ok(()) => {
1041 : // to help identify cases where we double-remove the same values, let's log all
1042 : // deletions?
1043 24 : tracing::info!("removed EvictionsWithLowResidenceDuration with {tenant_id}, {timeline_id}, {}, {threshold}", self.data_source);
1044 : }
1045 : }
1046 24 : }
1047 : }
1048 :
1049 : // Metrics collected on disk IO operations
1050 : //
1051 : // Roughly logarithmic scale.
1052 : const STORAGE_IO_TIME_BUCKETS: &[f64] = &[
1053 : 0.000030, // 30 usec
1054 : 0.001000, // 1000 usec
1055 : 0.030, // 30 ms
1056 : 1.000, // 1000 ms
1057 : 30.000, // 30000 ms
1058 : ];
1059 :
1060 : /// VirtualFile fs operation variants.
1061 : ///
1062 : /// Operations:
1063 : /// - open ([`std::fs::OpenOptions::open`])
1064 : /// - close (dropping [`crate::virtual_file::VirtualFile`])
1065 : /// - close-by-replace (close by replacement algorithm)
1066 : /// - read (`read_at`)
1067 : /// - write (`write_at`)
1068 : /// - seek (modify internal position or file length query)
1069 : /// - fsync ([`std::fs::File::sync_all`])
1070 : /// - metadata ([`std::fs::File::metadata`])
1071 : #[derive(
1072 5454 : Debug, Clone, Copy, strum_macros::EnumCount, strum_macros::EnumIter, strum_macros::FromRepr,
1073 : )]
1074 : pub(crate) enum StorageIoOperation {
1075 : Open,
1076 : OpenAfterReplace,
1077 : Close,
1078 : CloseByReplace,
1079 : Read,
1080 : Write,
1081 : Seek,
1082 : Fsync,
1083 : Metadata,
1084 : }
1085 :
1086 : impl StorageIoOperation {
1087 5454 : pub fn as_str(&self) -> &'static str {
1088 5454 : match self {
1089 606 : StorageIoOperation::Open => "open",
1090 606 : StorageIoOperation::OpenAfterReplace => "open-after-replace",
1091 606 : StorageIoOperation::Close => "close",
1092 606 : StorageIoOperation::CloseByReplace => "close-by-replace",
1093 606 : StorageIoOperation::Read => "read",
1094 606 : StorageIoOperation::Write => "write",
1095 606 : StorageIoOperation::Seek => "seek",
1096 606 : StorageIoOperation::Fsync => "fsync",
1097 606 : StorageIoOperation::Metadata => "metadata",
1098 : }
1099 5454 : }
1100 : }
1101 :
1102 : /// Tracks time taken by fs operations near VirtualFile.
1103 : #[derive(Debug)]
1104 : pub(crate) struct StorageIoTime {
1105 : metrics: [Histogram; StorageIoOperation::COUNT],
1106 : }
1107 :
1108 : impl StorageIoTime {
1109 606 : fn new() -> Self {
1110 606 : let storage_io_histogram_vec = register_histogram_vec!(
1111 606 : "pageserver_io_operations_seconds",
1112 606 : "Time spent in IO operations",
1113 606 : &["operation"],
1114 606 : STORAGE_IO_TIME_BUCKETS.into()
1115 606 : )
1116 606 : .expect("failed to define a metric");
1117 5454 : let metrics = std::array::from_fn(|i| {
1118 5454 : let op = StorageIoOperation::from_repr(i).unwrap();
1119 5454 : storage_io_histogram_vec
1120 5454 : .get_metric_with_label_values(&[op.as_str()])
1121 5454 : .unwrap()
1122 5454 : });
1123 606 : Self { metrics }
1124 606 : }
1125 :
1126 6871773 : pub(crate) fn get(&self, op: StorageIoOperation) -> &Histogram {
1127 6871773 : &self.metrics[op as usize]
1128 6871773 : }
1129 : }
1130 :
1131 : pub(crate) static STORAGE_IO_TIME_METRIC: Lazy<StorageIoTime> = Lazy::new(StorageIoTime::new);
1132 :
1133 : const STORAGE_IO_SIZE_OPERATIONS: &[&str] = &["read", "write"];
1134 :
1135 : // Needed for the https://neonprod.grafana.net/d/5uK9tHL4k/picking-tenant-for-relocation?orgId=1
1136 594 : pub(crate) static STORAGE_IO_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| {
1137 594 : register_int_gauge_vec!(
1138 594 : "pageserver_io_operations_bytes_total",
1139 594 : "Total amount of bytes read/written in IO operations",
1140 594 : &["operation", "tenant_id", "shard_id", "timeline_id"]
1141 594 : )
1142 594 : .expect("failed to define a metric")
1143 594 : });
1144 :
1145 : #[cfg(not(test))]
1146 : pub(crate) mod virtual_file_descriptor_cache {
1147 : use super::*;
1148 :
1149 0 : pub(crate) static SIZE_MAX: Lazy<UIntGauge> = Lazy::new(|| {
1150 0 : register_uint_gauge!(
1151 0 : "pageserver_virtual_file_descriptor_cache_size_max",
1152 0 : "Maximum number of open file descriptors in the cache."
1153 0 : )
1154 0 : .unwrap()
1155 0 : });
1156 :
1157 : // SIZE_CURRENT: derive it like so:
1158 : // ```
1159 : // sum (pageserver_io_operations_seconds_count{operation=~"^(open|open-after-replace)$")
1160 : // -ignoring(operation)
1161 : // sum(pageserver_io_operations_seconds_count{operation=~"^(close|close-by-replace)$"}
1162 : // ```
1163 : }
1164 :
1165 : #[cfg(not(test))]
1166 : pub(crate) mod virtual_file_io_engine {
1167 : use super::*;
1168 :
1169 0 : pub(crate) static KIND: Lazy<UIntGaugeVec> = Lazy::new(|| {
1170 0 : register_uint_gauge_vec!(
1171 0 : "pageserver_virtual_file_io_engine_kind",
1172 0 : "The configured io engine for VirtualFile",
1173 0 : &["kind"],
1174 0 : )
1175 0 : .unwrap()
1176 0 : });
1177 : }
1178 :
1179 : struct GlobalAndPerTimelineHistogramTimer<'a, 'c> {
1180 : global_latency_histo: &'a Histogram,
1181 :
1182 : // Optional because not all op types are tracked per-timeline
1183 : per_timeline_latency_histo: Option<&'a Histogram>,
1184 :
1185 : ctx: &'c RequestContext,
1186 : start: std::time::Instant,
1187 : op: SmgrQueryType,
1188 : }
1189 :
1190 : impl<'a, 'c> Drop for GlobalAndPerTimelineHistogramTimer<'a, 'c> {
1191 30 : fn drop(&mut self) {
1192 30 : let elapsed = self.start.elapsed();
1193 30 : let ex_throttled = self
1194 30 : .ctx
1195 30 : .micros_spent_throttled
1196 30 : .close_and_checked_sub_from(elapsed);
1197 30 : let ex_throttled = match ex_throttled {
1198 30 : Ok(res) => res,
1199 0 : Err(error) => {
1200 : use utils::rate_limit::RateLimit;
1201 : static LOGGED: Lazy<Mutex<enum_map::EnumMap<SmgrQueryType, RateLimit>>> =
1202 0 : Lazy::new(|| {
1203 0 : Mutex::new(enum_map::EnumMap::from_array(std::array::from_fn(|_| {
1204 0 : RateLimit::new(Duration::from_secs(10))
1205 0 : })))
1206 0 : });
1207 0 : let mut guard = LOGGED.lock().unwrap();
1208 0 : let rate_limit = &mut guard[self.op];
1209 0 : rate_limit.call(|| {
1210 0 : warn!(op=?self.op, error, "error deducting time spent throttled; this message is logged at a global rate limit");
1211 0 : });
1212 0 : elapsed
1213 : }
1214 : };
1215 30 : self.global_latency_histo
1216 30 : .observe(ex_throttled.as_secs_f64());
1217 30 : if let Some(per_timeline_getpage_histo) = self.per_timeline_latency_histo {
1218 6 : per_timeline_getpage_histo.observe(ex_throttled.as_secs_f64());
1219 24 : }
1220 30 : }
1221 : }
1222 :
1223 : #[derive(
1224 : Debug,
1225 : Clone,
1226 : Copy,
1227 15414 : IntoStaticStr,
1228 : strum_macros::EnumCount,
1229 72 : strum_macros::EnumIter,
1230 12780 : strum_macros::FromRepr,
1231 : enum_map::Enum,
1232 : )]
1233 : #[strum(serialize_all = "snake_case")]
1234 : pub enum SmgrQueryType {
1235 : GetRelExists,
1236 : GetRelSize,
1237 : GetPageAtLsn,
1238 : GetDbSize,
1239 : GetSlruSegment,
1240 : }
1241 :
1242 : #[derive(Debug)]
1243 : pub(crate) struct SmgrQueryTimePerTimeline {
1244 : global_started: [IntCounter; SmgrQueryType::COUNT],
1245 : global_latency: [Histogram; SmgrQueryType::COUNT],
1246 : per_timeline_getpage_started: IntCounter,
1247 : per_timeline_getpage_latency: Histogram,
1248 : }
1249 :
1250 522 : static SMGR_QUERY_STARTED_GLOBAL: Lazy<IntCounterVec> = Lazy::new(|| {
1251 522 : register_int_counter_vec!(
1252 522 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1253 522 : "pageserver_smgr_query_started_global_count",
1254 522 : "Number of smgr queries started, aggregated by query type.",
1255 522 : &["smgr_query_type"],
1256 522 : )
1257 522 : .expect("failed to define a metric")
1258 522 : });
1259 :
1260 522 : static SMGR_QUERY_STARTED_PER_TENANT_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| {
1261 522 : register_int_counter_vec!(
1262 522 : // it's a counter, but, name is prepared to extend it to a histogram of queue depth
1263 522 : "pageserver_smgr_query_started_count",
1264 522 : "Number of smgr queries started, aggregated by query type and tenant/timeline.",
1265 522 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1266 522 : )
1267 522 : .expect("failed to define a metric")
1268 522 : });
1269 :
1270 522 : static SMGR_QUERY_TIME_PER_TENANT_TIMELINE: Lazy<HistogramVec> = Lazy::new(|| {
1271 522 : register_histogram_vec!(
1272 522 : "pageserver_smgr_query_seconds",
1273 522 : "Time spent on smgr query handling, aggegated by query type and tenant/timeline.",
1274 522 : &["smgr_query_type", "tenant_id", "shard_id", "timeline_id"],
1275 522 : CRITICAL_OP_BUCKETS.into(),
1276 522 : )
1277 522 : .expect("failed to define a metric")
1278 522 : });
1279 :
1280 522 : static SMGR_QUERY_TIME_GLOBAL_BUCKETS: Lazy<Vec<f64>> = Lazy::new(|| {
1281 522 : [
1282 522 : 1,
1283 522 : 10,
1284 522 : 20,
1285 522 : 40,
1286 522 : 60,
1287 522 : 80,
1288 522 : 100,
1289 522 : 200,
1290 522 : 300,
1291 522 : 400,
1292 522 : 500,
1293 522 : 600,
1294 522 : 700,
1295 522 : 800,
1296 522 : 900,
1297 522 : 1_000, // 1ms
1298 522 : 2_000,
1299 522 : 4_000,
1300 522 : 6_000,
1301 522 : 8_000,
1302 522 : 10_000, // 10ms
1303 522 : 20_000,
1304 522 : 40_000,
1305 522 : 60_000,
1306 522 : 80_000,
1307 522 : 100_000,
1308 522 : 200_000,
1309 522 : 400_000,
1310 522 : 600_000,
1311 522 : 800_000,
1312 522 : 1_000_000, // 1s
1313 522 : 2_000_000,
1314 522 : 4_000_000,
1315 522 : 6_000_000,
1316 522 : 8_000_000,
1317 522 : 10_000_000, // 10s
1318 522 : 20_000_000,
1319 522 : 50_000_000,
1320 522 : 100_000_000,
1321 522 : 200_000_000,
1322 522 : 1_000_000_000, // 1000s
1323 522 : ]
1324 522 : .into_iter()
1325 522 : .map(Duration::from_micros)
1326 21402 : .map(|d| d.as_secs_f64())
1327 522 : .collect()
1328 522 : });
1329 :
1330 522 : static SMGR_QUERY_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| {
1331 522 : register_histogram_vec!(
1332 522 : "pageserver_smgr_query_seconds_global",
1333 522 : "Time spent on smgr query handling, aggregated by query type.",
1334 522 : &["smgr_query_type"],
1335 522 : SMGR_QUERY_TIME_GLOBAL_BUCKETS.clone(),
1336 522 : )
1337 522 : .expect("failed to define a metric")
1338 522 : });
1339 :
1340 : impl SmgrQueryTimePerTimeline {
1341 1278 : pub(crate) fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
1342 1278 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1343 1278 : let shard_slug = format!("{}", tenant_shard_id.shard_slug());
1344 1278 : let timeline_id = timeline_id.to_string();
1345 6390 : let global_started = std::array::from_fn(|i| {
1346 6390 : let op = SmgrQueryType::from_repr(i).unwrap();
1347 6390 : SMGR_QUERY_STARTED_GLOBAL
1348 6390 : .get_metric_with_label_values(&[op.into()])
1349 6390 : .unwrap()
1350 6390 : });
1351 6390 : let global_latency = std::array::from_fn(|i| {
1352 6390 : let op = SmgrQueryType::from_repr(i).unwrap();
1353 6390 : SMGR_QUERY_TIME_GLOBAL
1354 6390 : .get_metric_with_label_values(&[op.into()])
1355 6390 : .unwrap()
1356 6390 : });
1357 1278 :
1358 1278 : let per_timeline_getpage_started = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE
1359 1278 : .get_metric_with_label_values(&[
1360 1278 : SmgrQueryType::GetPageAtLsn.into(),
1361 1278 : &tenant_id,
1362 1278 : &shard_slug,
1363 1278 : &timeline_id,
1364 1278 : ])
1365 1278 : .unwrap();
1366 1278 : let per_timeline_getpage_latency = SMGR_QUERY_TIME_PER_TENANT_TIMELINE
1367 1278 : .get_metric_with_label_values(&[
1368 1278 : SmgrQueryType::GetPageAtLsn.into(),
1369 1278 : &tenant_id,
1370 1278 : &shard_slug,
1371 1278 : &timeline_id,
1372 1278 : ])
1373 1278 : .unwrap();
1374 1278 :
1375 1278 : Self {
1376 1278 : global_started,
1377 1278 : global_latency,
1378 1278 : per_timeline_getpage_latency,
1379 1278 : per_timeline_getpage_started,
1380 1278 : }
1381 1278 : }
1382 30 : pub(crate) fn start_timer<'c: 'a, 'a>(
1383 30 : &'a self,
1384 30 : op: SmgrQueryType,
1385 30 : ctx: &'c RequestContext,
1386 30 : ) -> Option<impl Drop + 'a> {
1387 30 : let start = Instant::now();
1388 30 :
1389 30 : self.global_started[op as usize].inc();
1390 30 :
1391 30 : // We subtract time spent throttled from the observed latency.
1392 30 : match ctx.micros_spent_throttled.open() {
1393 30 : Ok(()) => (),
1394 0 : Err(error) => {
1395 0 : use utils::rate_limit::RateLimit;
1396 0 : static LOGGED: Lazy<Mutex<enum_map::EnumMap<SmgrQueryType, RateLimit>>> =
1397 0 : Lazy::new(|| {
1398 0 : Mutex::new(enum_map::EnumMap::from_array(std::array::from_fn(|_| {
1399 0 : RateLimit::new(Duration::from_secs(10))
1400 0 : })))
1401 0 : });
1402 0 : let mut guard = LOGGED.lock().unwrap();
1403 0 : let rate_limit = &mut guard[op];
1404 0 : rate_limit.call(|| {
1405 0 : warn!(?op, error, "error opening micros_spent_throttled; this message is logged at a global rate limit");
1406 0 : });
1407 0 : }
1408 : }
1409 :
1410 30 : let per_timeline_latency_histo = if matches!(op, SmgrQueryType::GetPageAtLsn) {
1411 6 : self.per_timeline_getpage_started.inc();
1412 6 : Some(&self.per_timeline_getpage_latency)
1413 : } else {
1414 24 : None
1415 : };
1416 :
1417 30 : Some(GlobalAndPerTimelineHistogramTimer {
1418 30 : global_latency_histo: &self.global_latency[op as usize],
1419 30 : per_timeline_latency_histo,
1420 30 : ctx,
1421 30 : start,
1422 30 : op,
1423 30 : })
1424 30 : }
1425 : }
1426 :
1427 : #[cfg(test)]
1428 : mod smgr_query_time_tests {
1429 : use pageserver_api::shard::TenantShardId;
1430 : use strum::IntoEnumIterator;
1431 : use utils::id::{TenantId, TimelineId};
1432 :
1433 : use crate::{
1434 : context::{DownloadBehavior, RequestContext},
1435 : task_mgr::TaskKind,
1436 : };
1437 :
1438 : // Regression test, we used hard-coded string constants before using an enum.
1439 : #[test]
1440 6 : fn op_label_name() {
1441 : use super::SmgrQueryType::*;
1442 6 : let expect: [(super::SmgrQueryType, &'static str); 5] = [
1443 6 : (GetRelExists, "get_rel_exists"),
1444 6 : (GetRelSize, "get_rel_size"),
1445 6 : (GetPageAtLsn, "get_page_at_lsn"),
1446 6 : (GetDbSize, "get_db_size"),
1447 6 : (GetSlruSegment, "get_slru_segment"),
1448 6 : ];
1449 36 : for (op, expect) in expect {
1450 30 : let actual: &'static str = op.into();
1451 30 : assert_eq!(actual, expect);
1452 : }
1453 6 : }
1454 :
1455 : #[test]
1456 6 : fn basic() {
1457 6 : let ops: Vec<_> = super::SmgrQueryType::iter().collect();
1458 :
1459 36 : for op in &ops {
1460 30 : let tenant_id = TenantId::generate();
1461 30 : let timeline_id = TimelineId::generate();
1462 30 : let metrics = super::SmgrQueryTimePerTimeline::new(
1463 30 : &TenantShardId::unsharded(tenant_id),
1464 30 : &timeline_id,
1465 30 : );
1466 30 :
1467 60 : let get_counts = || {
1468 60 : let global: u64 = ops
1469 60 : .iter()
1470 300 : .map(|op| metrics.global_latency[*op as usize].get_sample_count())
1471 60 : .sum();
1472 60 : (
1473 60 : global,
1474 60 : metrics.per_timeline_getpage_latency.get_sample_count(),
1475 60 : )
1476 60 : };
1477 :
1478 30 : let (pre_global, pre_per_tenant_timeline) = get_counts();
1479 30 : assert_eq!(pre_per_tenant_timeline, 0);
1480 :
1481 30 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Download);
1482 30 : let timer = metrics.start_timer(*op, &ctx);
1483 30 : drop(timer);
1484 30 :
1485 30 : let (post_global, post_per_tenant_timeline) = get_counts();
1486 30 : if matches!(op, super::SmgrQueryType::GetPageAtLsn) {
1487 : // getpage ops are tracked per-timeline, others aren't
1488 6 : assert_eq!(post_per_tenant_timeline, 1);
1489 : } else {
1490 24 : assert_eq!(post_per_tenant_timeline, 0);
1491 : }
1492 30 : assert!(post_global > pre_global);
1493 : }
1494 6 : }
1495 : }
1496 :
1497 : // keep in sync with control plane Go code so that we can validate
1498 : // compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
1499 0 : static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
1500 0 : // Go code uses milliseconds. Variable is called `computeStartupBuckets`
1501 0 : [
1502 0 : 5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
1503 0 : 1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
1504 0 : ]
1505 0 : .map(|ms| (ms as f64) / 1000.0)
1506 0 : });
1507 :
1508 : pub(crate) struct BasebackupQueryTime {
1509 : ok: Histogram,
1510 : error: Histogram,
1511 : }
1512 :
1513 0 : pub(crate) static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
1514 0 : let vec = register_histogram_vec!(
1515 0 : "pageserver_basebackup_query_seconds",
1516 0 : "Histogram of basebackup queries durations, by result type",
1517 0 : &["result"],
1518 0 : COMPUTE_STARTUP_BUCKETS.to_vec(),
1519 0 : )
1520 0 : .expect("failed to define a metric");
1521 0 : BasebackupQueryTime {
1522 0 : ok: vec.get_metric_with_label_values(&["ok"]).unwrap(),
1523 0 : error: vec.get_metric_with_label_values(&["error"]).unwrap(),
1524 0 : }
1525 0 : });
1526 :
1527 : pub(crate) struct BasebackupQueryTimeOngoingRecording<'a, 'c> {
1528 : parent: &'a BasebackupQueryTime,
1529 : ctx: &'c RequestContext,
1530 : start: std::time::Instant,
1531 : }
1532 :
1533 : impl BasebackupQueryTime {
1534 0 : pub(crate) fn start_recording<'c: 'a, 'a>(
1535 0 : &'a self,
1536 0 : ctx: &'c RequestContext,
1537 0 : ) -> BasebackupQueryTimeOngoingRecording<'a, 'a> {
1538 0 : let start = Instant::now();
1539 0 : match ctx.micros_spent_throttled.open() {
1540 0 : Ok(()) => (),
1541 0 : Err(error) => {
1542 0 : use utils::rate_limit::RateLimit;
1543 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1544 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1545 0 : let mut rate_limit = LOGGED.lock().unwrap();
1546 0 : rate_limit.call(|| {
1547 0 : warn!(error, "error opening micros_spent_throttled; this message is logged at a global rate limit");
1548 0 : });
1549 0 : }
1550 : }
1551 0 : BasebackupQueryTimeOngoingRecording {
1552 0 : parent: self,
1553 0 : ctx,
1554 0 : start,
1555 0 : }
1556 0 : }
1557 : }
1558 :
1559 : impl<'a, 'c> BasebackupQueryTimeOngoingRecording<'a, 'c> {
1560 0 : pub(crate) fn observe<T, E>(self, res: &Result<T, E>) {
1561 0 : let elapsed = self.start.elapsed();
1562 0 : let ex_throttled = self
1563 0 : .ctx
1564 0 : .micros_spent_throttled
1565 0 : .close_and_checked_sub_from(elapsed);
1566 0 : let ex_throttled = match ex_throttled {
1567 0 : Ok(ex_throttled) => ex_throttled,
1568 0 : Err(error) => {
1569 : use utils::rate_limit::RateLimit;
1570 : static LOGGED: Lazy<Mutex<RateLimit>> =
1571 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1572 0 : let mut rate_limit = LOGGED.lock().unwrap();
1573 0 : rate_limit.call(|| {
1574 0 : warn!(error, "error deducting time spent throttled; this message is logged at a global rate limit");
1575 0 : });
1576 0 : elapsed
1577 : }
1578 : };
1579 0 : let metric = if res.is_ok() {
1580 0 : &self.parent.ok
1581 : } else {
1582 0 : &self.parent.error
1583 : };
1584 0 : metric.observe(ex_throttled.as_secs_f64());
1585 0 : }
1586 : }
1587 :
1588 0 : pub(crate) static LIVE_CONNECTIONS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1589 0 : register_int_counter_pair_vec!(
1590 0 : "pageserver_live_connections_started",
1591 0 : "Number of network connections that we started handling",
1592 0 : "pageserver_live_connections_finished",
1593 0 : "Number of network connections that we finished handling",
1594 0 : &["pageserver_connection_kind"]
1595 0 : )
1596 0 : .expect("failed to define a metric")
1597 0 : });
1598 :
1599 0 : #[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)]
1600 : pub(crate) enum ComputeCommandKind {
1601 : PageStreamV2,
1602 : Basebackup,
1603 : Fullbackup,
1604 : LeaseLsn,
1605 : }
1606 :
1607 : pub(crate) struct ComputeCommandCounters {
1608 : map: EnumMap<ComputeCommandKind, IntCounter>,
1609 : }
1610 :
1611 0 : pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy<ComputeCommandCounters> = Lazy::new(|| {
1612 0 : let inner = register_int_counter_vec!(
1613 0 : "pageserver_compute_commands",
1614 0 : "Number of compute -> pageserver commands processed",
1615 0 : &["command"]
1616 0 : )
1617 0 : .expect("failed to define a metric");
1618 0 :
1619 0 : ComputeCommandCounters {
1620 0 : map: EnumMap::from_array(std::array::from_fn(|i| {
1621 0 : let command = <ComputeCommandKind as enum_map::Enum>::from_usize(i);
1622 0 : let command_str: &'static str = command.into();
1623 0 : inner.with_label_values(&[command_str])
1624 0 : })),
1625 0 : }
1626 0 : });
1627 :
1628 : impl ComputeCommandCounters {
1629 0 : pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter {
1630 0 : &self.map[command]
1631 0 : }
1632 : }
1633 :
1634 : // remote storage metrics
1635 :
1636 510 : static REMOTE_TIMELINE_CLIENT_CALLS: Lazy<IntCounterPairVec> = Lazy::new(|| {
1637 510 : register_int_counter_pair_vec!(
1638 510 : "pageserver_remote_timeline_client_calls_started",
1639 510 : "Number of started calls to remote timeline client.",
1640 510 : "pageserver_remote_timeline_client_calls_finished",
1641 510 : "Number of finshed calls to remote timeline client.",
1642 510 : &[
1643 510 : "tenant_id",
1644 510 : "shard_id",
1645 510 : "timeline_id",
1646 510 : "file_kind",
1647 510 : "op_kind"
1648 510 : ],
1649 510 : )
1650 510 : .unwrap()
1651 510 : });
1652 :
1653 : static REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER: Lazy<IntCounterVec> =
1654 498 : Lazy::new(|| {
1655 498 : register_int_counter_vec!(
1656 498 : "pageserver_remote_timeline_client_bytes_started",
1657 498 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1658 498 : The increment happens when the operation is scheduled.",
1659 498 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1660 498 : )
1661 498 : .expect("failed to define a metric")
1662 498 : });
1663 :
1664 498 : static REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| {
1665 498 : register_int_counter_vec!(
1666 498 : "pageserver_remote_timeline_client_bytes_finished",
1667 498 : "Incremented by the number of bytes associated with a remote timeline client operation. \
1668 498 : The increment happens when the operation finishes (regardless of success/failure/shutdown).",
1669 498 : &["tenant_id", "shard_id", "timeline_id", "file_kind", "op_kind"],
1670 498 : )
1671 498 : .expect("failed to define a metric")
1672 498 : });
1673 :
1674 : pub(crate) struct TenantManagerMetrics {
1675 : tenant_slots_attached: UIntGauge,
1676 : tenant_slots_secondary: UIntGauge,
1677 : tenant_slots_inprogress: UIntGauge,
1678 : pub(crate) tenant_slot_writes: IntCounter,
1679 : pub(crate) unexpected_errors: IntCounter,
1680 : }
1681 :
1682 : impl TenantManagerMetrics {
1683 : /// Helpers for tracking slots. Note that these do not track the lifetime of TenantSlot objects
1684 : /// exactly: they track the lifetime of the slots _in the tenant map_.
1685 6 : pub(crate) fn slot_inserted(&self, slot: &TenantSlot) {
1686 6 : match slot {
1687 0 : TenantSlot::Attached(_) => {
1688 0 : self.tenant_slots_attached.inc();
1689 0 : }
1690 0 : TenantSlot::Secondary(_) => {
1691 0 : self.tenant_slots_secondary.inc();
1692 0 : }
1693 6 : TenantSlot::InProgress(_) => {
1694 6 : self.tenant_slots_inprogress.inc();
1695 6 : }
1696 : }
1697 6 : }
1698 :
1699 6 : pub(crate) fn slot_removed(&self, slot: &TenantSlot) {
1700 6 : match slot {
1701 6 : TenantSlot::Attached(_) => {
1702 6 : self.tenant_slots_attached.dec();
1703 6 : }
1704 0 : TenantSlot::Secondary(_) => {
1705 0 : self.tenant_slots_secondary.dec();
1706 0 : }
1707 0 : TenantSlot::InProgress(_) => {
1708 0 : self.tenant_slots_inprogress.dec();
1709 0 : }
1710 : }
1711 6 : }
1712 :
1713 : #[cfg(all(debug_assertions, not(test)))]
1714 0 : pub(crate) fn slots_total(&self) -> u64 {
1715 0 : self.tenant_slots_attached.get()
1716 0 : + self.tenant_slots_secondary.get()
1717 0 : + self.tenant_slots_inprogress.get()
1718 0 : }
1719 : }
1720 :
1721 6 : pub(crate) static TENANT_MANAGER: Lazy<TenantManagerMetrics> = Lazy::new(|| {
1722 6 : let tenant_slots = register_uint_gauge_vec!(
1723 6 : "pageserver_tenant_manager_slots",
1724 6 : "How many slots currently exist, including all attached, secondary and in-progress operations",
1725 6 : &["mode"]
1726 6 : )
1727 6 : .expect("failed to define a metric");
1728 6 : TenantManagerMetrics {
1729 6 : tenant_slots_attached: tenant_slots
1730 6 : .get_metric_with_label_values(&["attached"])
1731 6 : .unwrap(),
1732 6 : tenant_slots_secondary: tenant_slots
1733 6 : .get_metric_with_label_values(&["secondary"])
1734 6 : .unwrap(),
1735 6 : tenant_slots_inprogress: tenant_slots
1736 6 : .get_metric_with_label_values(&["inprogress"])
1737 6 : .unwrap(),
1738 6 : tenant_slot_writes: register_int_counter!(
1739 6 : "pageserver_tenant_manager_slot_writes",
1740 6 : "Writes to a tenant slot, including all of create/attach/detach/delete"
1741 6 : )
1742 6 : .expect("failed to define a metric"),
1743 6 : unexpected_errors: register_int_counter!(
1744 6 : "pageserver_tenant_manager_unexpected_errors_total",
1745 6 : "Number of unexpected conditions encountered: nonzero value indicates a non-fatal bug."
1746 6 : )
1747 6 : .expect("failed to define a metric"),
1748 6 : }
1749 6 : });
1750 :
1751 : pub(crate) struct DeletionQueueMetrics {
1752 : pub(crate) keys_submitted: IntCounter,
1753 : pub(crate) keys_dropped: IntCounter,
1754 : pub(crate) keys_executed: IntCounter,
1755 : pub(crate) keys_validated: IntCounter,
1756 : pub(crate) dropped_lsn_updates: IntCounter,
1757 : pub(crate) unexpected_errors: IntCounter,
1758 : pub(crate) remote_errors: IntCounterVec,
1759 : }
1760 82 : pub(crate) static DELETION_QUEUE: Lazy<DeletionQueueMetrics> = Lazy::new(|| {
1761 82 : DeletionQueueMetrics{
1762 82 :
1763 82 : keys_submitted: register_int_counter!(
1764 82 : "pageserver_deletion_queue_submitted_total",
1765 82 : "Number of objects submitted for deletion"
1766 82 : )
1767 82 : .expect("failed to define a metric"),
1768 82 :
1769 82 : keys_dropped: register_int_counter!(
1770 82 : "pageserver_deletion_queue_dropped_total",
1771 82 : "Number of object deletions dropped due to stale generation."
1772 82 : )
1773 82 : .expect("failed to define a metric"),
1774 82 :
1775 82 : keys_executed: register_int_counter!(
1776 82 : "pageserver_deletion_queue_executed_total",
1777 82 : "Number of objects deleted. Only includes objects that we actually deleted, sum with pageserver_deletion_queue_dropped_total for the total number of keys processed to completion"
1778 82 : )
1779 82 : .expect("failed to define a metric"),
1780 82 :
1781 82 : keys_validated: register_int_counter!(
1782 82 : "pageserver_deletion_queue_validated_total",
1783 82 : "Number of keys validated for deletion. Sum with pageserver_deletion_queue_dropped_total for the total number of keys that have passed through the validation stage."
1784 82 : )
1785 82 : .expect("failed to define a metric"),
1786 82 :
1787 82 : dropped_lsn_updates: register_int_counter!(
1788 82 : "pageserver_deletion_queue_dropped_lsn_updates_total",
1789 82 : "Updates to remote_consistent_lsn dropped due to stale generation number."
1790 82 : )
1791 82 : .expect("failed to define a metric"),
1792 82 : unexpected_errors: register_int_counter!(
1793 82 : "pageserver_deletion_queue_unexpected_errors_total",
1794 82 : "Number of unexpected condiions that may stall the queue: any value above zero is unexpected."
1795 82 : )
1796 82 : .expect("failed to define a metric"),
1797 82 : remote_errors: register_int_counter_vec!(
1798 82 : "pageserver_deletion_queue_remote_errors_total",
1799 82 : "Retryable remote I/O errors while executing deletions, for example 503 responses to DeleteObjects",
1800 82 : &["op_kind"],
1801 82 : )
1802 82 : .expect("failed to define a metric")
1803 82 : }
1804 82 : });
1805 :
1806 : pub(crate) struct SecondaryModeMetrics {
1807 : pub(crate) upload_heatmap: IntCounter,
1808 : pub(crate) upload_heatmap_errors: IntCounter,
1809 : pub(crate) upload_heatmap_duration: Histogram,
1810 : pub(crate) download_heatmap: IntCounter,
1811 : pub(crate) download_layer: IntCounter,
1812 : }
1813 0 : pub(crate) static SECONDARY_MODE: Lazy<SecondaryModeMetrics> = Lazy::new(|| {
1814 0 : SecondaryModeMetrics {
1815 0 : upload_heatmap: register_int_counter!(
1816 0 : "pageserver_secondary_upload_heatmap",
1817 0 : "Number of heatmaps written to remote storage by attached tenants"
1818 0 : )
1819 0 : .expect("failed to define a metric"),
1820 0 : upload_heatmap_errors: register_int_counter!(
1821 0 : "pageserver_secondary_upload_heatmap_errors",
1822 0 : "Failures writing heatmap to remote storage"
1823 0 : )
1824 0 : .expect("failed to define a metric"),
1825 0 : upload_heatmap_duration: register_histogram!(
1826 0 : "pageserver_secondary_upload_heatmap_duration",
1827 0 : "Time to build and upload a heatmap, including any waiting inside the remote storage client"
1828 0 : )
1829 0 : .expect("failed to define a metric"),
1830 0 : download_heatmap: register_int_counter!(
1831 0 : "pageserver_secondary_download_heatmap",
1832 0 : "Number of downloads of heatmaps by secondary mode locations, including when it hasn't changed"
1833 0 : )
1834 0 : .expect("failed to define a metric"),
1835 0 : download_layer: register_int_counter!(
1836 0 : "pageserver_secondary_download_layer",
1837 0 : "Number of downloads of layers by secondary mode locations"
1838 0 : )
1839 0 : .expect("failed to define a metric"),
1840 0 : }
1841 0 : });
1842 :
1843 0 : pub(crate) static SECONDARY_RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1844 0 : register_uint_gauge_vec!(
1845 0 : "pageserver_secondary_resident_physical_size",
1846 0 : "The size of the layer files present in the pageserver's filesystem, for secondary locations.",
1847 0 : &["tenant_id", "shard_id"]
1848 0 : )
1849 0 : .expect("failed to define a metric")
1850 0 : });
1851 :
1852 0 : pub(crate) static NODE_UTILIZATION_SCORE: Lazy<UIntGauge> = Lazy::new(|| {
1853 0 : register_uint_gauge!(
1854 0 : "pageserver_utilization_score",
1855 0 : "The utilization score we report to the storage controller for scheduling, where 0 is empty, 1000000 is full, and anything above is considered overloaded",
1856 0 : )
1857 0 : .expect("failed to define a metric")
1858 0 : });
1859 :
1860 0 : pub(crate) static SECONDARY_HEATMAP_TOTAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
1861 0 : register_uint_gauge_vec!(
1862 0 : "pageserver_secondary_heatmap_total_size",
1863 0 : "The total size in bytes of all layers in the most recently downloaded heatmap.",
1864 0 : &["tenant_id", "shard_id"]
1865 0 : )
1866 0 : .expect("failed to define a metric")
1867 0 : });
1868 :
1869 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1870 : pub enum RemoteOpKind {
1871 : Upload,
1872 : Download,
1873 : Delete,
1874 : }
1875 : impl RemoteOpKind {
1876 40236 : pub fn as_str(&self) -> &'static str {
1877 40236 : match self {
1878 37975 : Self::Upload => "upload",
1879 156 : Self::Download => "download",
1880 2105 : Self::Delete => "delete",
1881 : }
1882 40236 : }
1883 : }
1884 :
1885 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
1886 : pub enum RemoteOpFileKind {
1887 : Layer,
1888 : Index,
1889 : }
1890 : impl RemoteOpFileKind {
1891 40236 : pub fn as_str(&self) -> &'static str {
1892 40236 : match self {
1893 27296 : Self::Layer => "layer",
1894 12940 : Self::Index => "index",
1895 : }
1896 40236 : }
1897 : }
1898 :
1899 499 : pub(crate) static REMOTE_OPERATION_TIME: Lazy<HistogramVec> = Lazy::new(|| {
1900 499 : register_histogram_vec!(
1901 499 : "pageserver_remote_operation_seconds",
1902 499 : "Time spent on remote storage operations. \
1903 499 : Grouped by tenant, timeline, operation_kind and status. \
1904 499 : Does not account for time spent waiting in remote timeline client's queues.",
1905 499 : &["file_kind", "op_kind", "status"]
1906 499 : )
1907 499 : .expect("failed to define a metric")
1908 499 : });
1909 :
1910 0 : pub(crate) static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
1911 0 : register_int_counter_vec!(
1912 0 : "pageserver_tenant_task_events",
1913 0 : "Number of task start/stop/fail events.",
1914 0 : &["event"],
1915 0 : )
1916 0 : .expect("Failed to register tenant_task_events metric")
1917 0 : });
1918 :
1919 : pub struct BackgroundLoopSemaphoreMetrics {
1920 : counters: EnumMap<BackgroundLoopKind, IntCounterPair>,
1921 : durations: EnumMap<BackgroundLoopKind, Counter>,
1922 : }
1923 :
1924 : pub(crate) static BACKGROUND_LOOP_SEMAPHORE: Lazy<BackgroundLoopSemaphoreMetrics> = Lazy::new(
1925 60 : || {
1926 60 : let counters = register_int_counter_pair_vec!(
1927 60 : "pageserver_background_loop_semaphore_wait_start_count",
1928 60 : "Counter for background loop concurrency-limiting semaphore acquire calls started",
1929 60 : "pageserver_background_loop_semaphore_wait_finish_count",
1930 60 : "Counter for background loop concurrency-limiting semaphore acquire calls finished",
1931 60 : &["task"],
1932 60 : )
1933 60 : .unwrap();
1934 60 :
1935 60 : let durations = register_counter_vec!(
1936 60 : "pageserver_background_loop_semaphore_wait_duration_seconds",
1937 60 : "Sum of wall clock time spent waiting on the background loop concurrency-limiting semaphore acquire calls",
1938 60 : &["task"],
1939 60 : )
1940 60 : .unwrap();
1941 60 :
1942 60 : BackgroundLoopSemaphoreMetrics {
1943 540 : counters: enum_map::EnumMap::from_array(std::array::from_fn(|i| {
1944 540 : let kind = <BackgroundLoopKind as enum_map::Enum>::from_usize(i);
1945 540 : counters.with_label_values(&[kind.into()])
1946 540 : })),
1947 540 : durations: enum_map::EnumMap::from_array(std::array::from_fn(|i| {
1948 540 : let kind = <BackgroundLoopKind as enum_map::Enum>::from_usize(i);
1949 540 : durations.with_label_values(&[kind.into()])
1950 540 : })),
1951 60 : }
1952 60 : },
1953 : );
1954 :
1955 : impl BackgroundLoopSemaphoreMetrics {
1956 1092 : pub(crate) fn measure_acquisition(&self, task: BackgroundLoopKind) -> impl Drop + '_ {
1957 : struct Record<'a> {
1958 : metrics: &'a BackgroundLoopSemaphoreMetrics,
1959 : task: BackgroundLoopKind,
1960 : _counter_guard: metrics::IntCounterPairGuard,
1961 : start: Instant,
1962 : }
1963 : impl Drop for Record<'_> {
1964 1092 : fn drop(&mut self) {
1965 1092 : let elapsed = self.start.elapsed().as_secs_f64();
1966 1092 : self.metrics.durations[self.task].inc_by(elapsed);
1967 1092 : }
1968 : }
1969 1092 : Record {
1970 1092 : metrics: self,
1971 1092 : task,
1972 1092 : _counter_guard: self.counters[task].guard(),
1973 1092 : start: Instant::now(),
1974 1092 : }
1975 1092 : }
1976 : }
1977 :
1978 0 : pub(crate) static BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT: Lazy<IntCounterVec> = Lazy::new(|| {
1979 0 : register_int_counter_vec!(
1980 0 : "pageserver_background_loop_period_overrun_count",
1981 0 : "Incremented whenever warn_when_period_overrun() logs a warning.",
1982 0 : &["task", "period"],
1983 0 : )
1984 0 : .expect("failed to define a metric")
1985 0 : });
1986 :
1987 : // walreceiver metrics
1988 :
1989 0 : pub(crate) static WALRECEIVER_STARTED_CONNECTIONS: Lazy<IntCounter> = Lazy::new(|| {
1990 0 : register_int_counter!(
1991 0 : "pageserver_walreceiver_started_connections_total",
1992 0 : "Number of started walreceiver connections"
1993 0 : )
1994 0 : .expect("failed to define a metric")
1995 0 : });
1996 :
1997 0 : pub(crate) static WALRECEIVER_ACTIVE_MANAGERS: Lazy<IntGauge> = Lazy::new(|| {
1998 0 : register_int_gauge!(
1999 0 : "pageserver_walreceiver_active_managers",
2000 0 : "Number of active walreceiver managers"
2001 0 : )
2002 0 : .expect("failed to define a metric")
2003 0 : });
2004 :
2005 0 : pub(crate) static WALRECEIVER_SWITCHES: Lazy<IntCounterVec> = Lazy::new(|| {
2006 0 : register_int_counter_vec!(
2007 0 : "pageserver_walreceiver_switches_total",
2008 0 : "Number of walreceiver manager change_connection calls",
2009 0 : &["reason"]
2010 0 : )
2011 0 : .expect("failed to define a metric")
2012 0 : });
2013 :
2014 0 : pub(crate) static WALRECEIVER_BROKER_UPDATES: Lazy<IntCounter> = Lazy::new(|| {
2015 0 : register_int_counter!(
2016 0 : "pageserver_walreceiver_broker_updates_total",
2017 0 : "Number of received broker updates in walreceiver"
2018 0 : )
2019 0 : .expect("failed to define a metric")
2020 0 : });
2021 :
2022 6 : pub(crate) static WALRECEIVER_CANDIDATES_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
2023 6 : register_int_counter_vec!(
2024 6 : "pageserver_walreceiver_candidates_events_total",
2025 6 : "Number of walreceiver candidate events",
2026 6 : &["event"]
2027 6 : )
2028 6 : .expect("failed to define a metric")
2029 6 : });
2030 :
2031 : pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy<IntCounter> =
2032 0 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["add"]));
2033 :
2034 : pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy<IntCounter> =
2035 6 : Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"]));
2036 :
2037 : // Metrics collected on WAL redo operations
2038 : //
2039 : // We collect the time spent in actual WAL redo ('redo'), and time waiting
2040 : // for access to the postgres process ('wait') since there is only one for
2041 : // each tenant.
2042 :
2043 : /// Time buckets are small because we want to be able to measure the
2044 : /// smallest redo processing times. These buckets allow us to measure down
2045 : /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
2046 : /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
2047 : ///
2048 : /// Values up to 1s are recorded because metrics show that we have redo
2049 : /// durations and lock times larger than 0.250s.
2050 : macro_rules! redo_histogram_time_buckets {
2051 : () => {
2052 : vec![
2053 : 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
2054 : 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
2055 : 1.000_000,
2056 : ]
2057 : };
2058 : }
2059 :
2060 : /// While we're at it, also measure the amount of records replayed in each
2061 : /// operation. We have a global 'total replayed' counter, but that's not
2062 : /// as useful as 'what is the skew for how many records we replay in one
2063 : /// operation'.
2064 : macro_rules! redo_histogram_count_buckets {
2065 : () => {
2066 : vec![0.0, 1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0]
2067 : };
2068 : }
2069 :
2070 : macro_rules! redo_bytes_histogram_count_buckets {
2071 : () => {
2072 : // powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
2073 : // rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
2074 : vec![
2075 : 24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2076 : 2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
2077 : ]
2078 : };
2079 : }
2080 :
2081 : pub(crate) struct WalIngestMetrics {
2082 : pub(crate) bytes_received: IntCounter,
2083 : pub(crate) records_received: IntCounter,
2084 : pub(crate) records_committed: IntCounter,
2085 : pub(crate) records_filtered: IntCounter,
2086 : }
2087 :
2088 6 : pub(crate) static WAL_INGEST: Lazy<WalIngestMetrics> = Lazy::new(|| WalIngestMetrics {
2089 6 : bytes_received: register_int_counter!(
2090 6 : "pageserver_wal_ingest_bytes_received",
2091 6 : "Bytes of WAL ingested from safekeepers",
2092 6 : )
2093 6 : .unwrap(),
2094 6 : records_received: register_int_counter!(
2095 6 : "pageserver_wal_ingest_records_received",
2096 6 : "Number of WAL records received from safekeepers"
2097 6 : )
2098 6 : .expect("failed to define a metric"),
2099 6 : records_committed: register_int_counter!(
2100 6 : "pageserver_wal_ingest_records_committed",
2101 6 : "Number of WAL records which resulted in writes to pageserver storage"
2102 6 : )
2103 6 : .expect("failed to define a metric"),
2104 6 : records_filtered: register_int_counter!(
2105 6 : "pageserver_wal_ingest_records_filtered",
2106 6 : "Number of WAL records filtered out due to sharding"
2107 6 : )
2108 6 : .expect("failed to define a metric"),
2109 6 : });
2110 :
2111 18 : pub(crate) static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
2112 18 : register_histogram!(
2113 18 : "pageserver_wal_redo_seconds",
2114 18 : "Time spent on WAL redo",
2115 18 : redo_histogram_time_buckets!()
2116 18 : )
2117 18 : .expect("failed to define a metric")
2118 18 : });
2119 :
2120 18 : pub(crate) static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2121 18 : register_histogram!(
2122 18 : "pageserver_wal_redo_records_histogram",
2123 18 : "Histogram of number of records replayed per redo in the Postgres WAL redo process",
2124 18 : redo_histogram_count_buckets!(),
2125 18 : )
2126 18 : .expect("failed to define a metric")
2127 18 : });
2128 :
2129 18 : pub(crate) static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2130 18 : register_histogram!(
2131 18 : "pageserver_wal_redo_bytes_histogram",
2132 18 : "Histogram of number of records replayed per redo sent to Postgres",
2133 18 : redo_bytes_histogram_count_buckets!(),
2134 18 : )
2135 18 : .expect("failed to define a metric")
2136 18 : });
2137 :
2138 : // FIXME: isn't this already included by WAL_REDO_RECORDS_HISTOGRAM which has _count?
2139 18 : pub(crate) static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
2140 18 : register_int_counter!(
2141 18 : "pageserver_replayed_wal_records_total",
2142 18 : "Number of WAL records replayed in WAL redo process"
2143 18 : )
2144 18 : .unwrap()
2145 18 : });
2146 :
2147 : #[rustfmt::skip]
2148 24 : pub(crate) static WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
2149 24 : register_histogram!(
2150 24 : "pageserver_wal_redo_process_launch_duration",
2151 24 : "Histogram of the duration of successful WalRedoProcess::launch calls",
2152 24 : vec![
2153 24 : 0.0002, 0.0004, 0.0006, 0.0008, 0.0010,
2154 24 : 0.0020, 0.0040, 0.0060, 0.0080, 0.0100,
2155 24 : 0.0200, 0.0400, 0.0600, 0.0800, 0.1000,
2156 24 : 0.2000, 0.4000, 0.6000, 0.8000, 1.0000,
2157 24 : 1.5000, 2.0000, 2.5000, 3.0000, 4.0000, 10.0000
2158 24 : ],
2159 24 : )
2160 24 : .expect("failed to define a metric")
2161 24 : });
2162 :
2163 : pub(crate) struct WalRedoProcessCounters {
2164 : pub(crate) started: IntCounter,
2165 : pub(crate) killed_by_cause: enum_map::EnumMap<WalRedoKillCause, IntCounter>,
2166 : pub(crate) active_stderr_logger_tasks_started: IntCounter,
2167 : pub(crate) active_stderr_logger_tasks_finished: IntCounter,
2168 : }
2169 :
2170 72 : #[derive(Debug, enum_map::Enum, strum_macros::IntoStaticStr)]
2171 : pub(crate) enum WalRedoKillCause {
2172 : WalRedoProcessDrop,
2173 : NoLeakChildDrop,
2174 : Startup,
2175 : }
2176 :
2177 : impl Default for WalRedoProcessCounters {
2178 24 : fn default() -> Self {
2179 24 : let started = register_int_counter!(
2180 24 : "pageserver_wal_redo_process_started_total",
2181 24 : "Number of WAL redo processes started",
2182 24 : )
2183 24 : .unwrap();
2184 24 :
2185 24 : let killed = register_int_counter_vec!(
2186 24 : "pageserver_wal_redo_process_stopped_total",
2187 24 : "Number of WAL redo processes stopped",
2188 24 : &["cause"],
2189 24 : )
2190 24 : .unwrap();
2191 24 :
2192 24 : let active_stderr_logger_tasks_started = register_int_counter!(
2193 24 : "pageserver_walredo_stderr_logger_tasks_started_total",
2194 24 : "Number of active walredo stderr logger tasks that have started",
2195 24 : )
2196 24 : .unwrap();
2197 24 :
2198 24 : let active_stderr_logger_tasks_finished = register_int_counter!(
2199 24 : "pageserver_walredo_stderr_logger_tasks_finished_total",
2200 24 : "Number of active walredo stderr logger tasks that have finished",
2201 24 : )
2202 24 : .unwrap();
2203 24 :
2204 24 : Self {
2205 24 : started,
2206 72 : killed_by_cause: EnumMap::from_array(std::array::from_fn(|i| {
2207 72 : let cause = <WalRedoKillCause as enum_map::Enum>::from_usize(i);
2208 72 : let cause_str: &'static str = cause.into();
2209 72 : killed.with_label_values(&[cause_str])
2210 72 : })),
2211 24 : active_stderr_logger_tasks_started,
2212 24 : active_stderr_logger_tasks_finished,
2213 24 : }
2214 24 : }
2215 : }
2216 :
2217 : pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy<WalRedoProcessCounters> =
2218 : Lazy::new(WalRedoProcessCounters::default);
2219 :
2220 : /// Similar to `prometheus::HistogramTimer` but does not record on drop.
2221 : pub(crate) struct StorageTimeMetricsTimer {
2222 : metrics: StorageTimeMetrics,
2223 : start: Instant,
2224 : }
2225 :
2226 : impl StorageTimeMetricsTimer {
2227 14608 : fn new(metrics: StorageTimeMetrics) -> Self {
2228 14608 : Self {
2229 14608 : metrics,
2230 14608 : start: Instant::now(),
2231 14608 : }
2232 14608 : }
2233 :
2234 : /// Record the time from creation to now.
2235 11211 : pub fn stop_and_record(self) {
2236 11211 : let duration = self.start.elapsed().as_secs_f64();
2237 11211 : self.metrics.timeline_sum.inc_by(duration);
2238 11211 : self.metrics.timeline_count.inc();
2239 11211 : self.metrics.global_histogram.observe(duration);
2240 11211 : }
2241 :
2242 : /// Turns this timer into a timer, which will always record -- usually this means recording
2243 : /// regardless an early `?` path was taken in a function.
2244 2262 : pub(crate) fn record_on_drop(self) -> AlwaysRecordingStorageTimeMetricsTimer {
2245 2262 : AlwaysRecordingStorageTimeMetricsTimer(Some(self))
2246 2262 : }
2247 : }
2248 :
2249 : pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option<StorageTimeMetricsTimer>);
2250 :
2251 : impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
2252 2262 : fn drop(&mut self) {
2253 2262 : if let Some(inner) = self.0.take() {
2254 2262 : inner.stop_and_record();
2255 2262 : }
2256 2262 : }
2257 : }
2258 :
2259 : /// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
2260 : /// timeline total sum and count.
2261 : #[derive(Clone, Debug)]
2262 : pub(crate) struct StorageTimeMetrics {
2263 : /// Sum of f64 seconds, per operation, tenant_id and timeline_id
2264 : timeline_sum: Counter,
2265 : /// Number of oeprations, per operation, tenant_id and timeline_id
2266 : timeline_count: IntCounter,
2267 : /// Global histogram having only the "operation" label.
2268 : global_histogram: Histogram,
2269 : }
2270 :
2271 : impl StorageTimeMetrics {
2272 9984 : pub fn new(
2273 9984 : operation: StorageTimeOperation,
2274 9984 : tenant_id: &str,
2275 9984 : shard_id: &str,
2276 9984 : timeline_id: &str,
2277 9984 : ) -> Self {
2278 9984 : let operation: &'static str = operation.into();
2279 9984 :
2280 9984 : let timeline_sum = STORAGE_TIME_SUM_PER_TIMELINE
2281 9984 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2282 9984 : .unwrap();
2283 9984 : let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE
2284 9984 : .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id])
2285 9984 : .unwrap();
2286 9984 : let global_histogram = STORAGE_TIME_GLOBAL
2287 9984 : .get_metric_with_label_values(&[operation])
2288 9984 : .unwrap();
2289 9984 :
2290 9984 : StorageTimeMetrics {
2291 9984 : timeline_sum,
2292 9984 : timeline_count,
2293 9984 : global_histogram,
2294 9984 : }
2295 9984 : }
2296 :
2297 : /// Starts timing a new operation.
2298 : ///
2299 : /// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
2300 14608 : pub fn start_timer(&self) -> StorageTimeMetricsTimer {
2301 14608 : StorageTimeMetricsTimer::new(self.clone())
2302 14608 : }
2303 : }
2304 :
2305 : #[derive(Debug)]
2306 : pub(crate) struct TimelineMetrics {
2307 : tenant_id: String,
2308 : shard_id: String,
2309 : timeline_id: String,
2310 : pub flush_time_histo: StorageTimeMetrics,
2311 : pub compact_time_histo: StorageTimeMetrics,
2312 : pub create_images_time_histo: StorageTimeMetrics,
2313 : pub logical_size_histo: StorageTimeMetrics,
2314 : pub imitate_logical_size_histo: StorageTimeMetrics,
2315 : pub load_layer_map_histo: StorageTimeMetrics,
2316 : pub garbage_collect_histo: StorageTimeMetrics,
2317 : pub find_gc_cutoffs_histo: StorageTimeMetrics,
2318 : pub last_record_gauge: IntGauge,
2319 : pub pitr_history_size: UIntGauge,
2320 : pub archival_size: UIntGauge,
2321 : pub(crate) layer_size_image: UIntGauge,
2322 : pub(crate) layer_count_image: UIntGauge,
2323 : pub(crate) layer_size_delta: UIntGauge,
2324 : pub(crate) layer_count_delta: UIntGauge,
2325 : pub standby_horizon_gauge: IntGauge,
2326 : pub resident_physical_size_gauge: UIntGauge,
2327 : pub visible_physical_size_gauge: UIntGauge,
2328 : /// copy of LayeredTimeline.current_logical_size
2329 : pub current_logical_size_gauge: UIntGauge,
2330 : pub aux_file_size_gauge: IntGauge,
2331 : pub directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>>,
2332 : pub evictions: IntCounter,
2333 : pub evictions_with_low_residence_duration: std::sync::RwLock<EvictionsWithLowResidenceDuration>,
2334 : /// Number of valid LSN leases.
2335 : pub valid_lsn_lease_count_gauge: UIntGauge,
2336 : shutdown: std::sync::atomic::AtomicBool,
2337 : }
2338 :
2339 : impl TimelineMetrics {
2340 1248 : pub fn new(
2341 1248 : tenant_shard_id: &TenantShardId,
2342 1248 : timeline_id_raw: &TimelineId,
2343 1248 : evictions_with_low_residence_duration_builder: EvictionsWithLowResidenceDurationBuilder,
2344 1248 : ) -> Self {
2345 1248 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2346 1248 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2347 1248 : let timeline_id = timeline_id_raw.to_string();
2348 1248 : let flush_time_histo = StorageTimeMetrics::new(
2349 1248 : StorageTimeOperation::LayerFlush,
2350 1248 : &tenant_id,
2351 1248 : &shard_id,
2352 1248 : &timeline_id,
2353 1248 : );
2354 1248 : let compact_time_histo = StorageTimeMetrics::new(
2355 1248 : StorageTimeOperation::Compact,
2356 1248 : &tenant_id,
2357 1248 : &shard_id,
2358 1248 : &timeline_id,
2359 1248 : );
2360 1248 : let create_images_time_histo = StorageTimeMetrics::new(
2361 1248 : StorageTimeOperation::CreateImages,
2362 1248 : &tenant_id,
2363 1248 : &shard_id,
2364 1248 : &timeline_id,
2365 1248 : );
2366 1248 : let logical_size_histo = StorageTimeMetrics::new(
2367 1248 : StorageTimeOperation::LogicalSize,
2368 1248 : &tenant_id,
2369 1248 : &shard_id,
2370 1248 : &timeline_id,
2371 1248 : );
2372 1248 : let imitate_logical_size_histo = StorageTimeMetrics::new(
2373 1248 : StorageTimeOperation::ImitateLogicalSize,
2374 1248 : &tenant_id,
2375 1248 : &shard_id,
2376 1248 : &timeline_id,
2377 1248 : );
2378 1248 : let load_layer_map_histo = StorageTimeMetrics::new(
2379 1248 : StorageTimeOperation::LoadLayerMap,
2380 1248 : &tenant_id,
2381 1248 : &shard_id,
2382 1248 : &timeline_id,
2383 1248 : );
2384 1248 : let garbage_collect_histo = StorageTimeMetrics::new(
2385 1248 : StorageTimeOperation::Gc,
2386 1248 : &tenant_id,
2387 1248 : &shard_id,
2388 1248 : &timeline_id,
2389 1248 : );
2390 1248 : let find_gc_cutoffs_histo = StorageTimeMetrics::new(
2391 1248 : StorageTimeOperation::FindGcCutoffs,
2392 1248 : &tenant_id,
2393 1248 : &shard_id,
2394 1248 : &timeline_id,
2395 1248 : );
2396 1248 : let last_record_gauge = LAST_RECORD_LSN
2397 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2398 1248 : .unwrap();
2399 1248 :
2400 1248 : let pitr_history_size = PITR_HISTORY_SIZE
2401 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2402 1248 : .unwrap();
2403 1248 :
2404 1248 : let archival_size = TIMELINE_ARCHIVE_SIZE
2405 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2406 1248 : .unwrap();
2407 1248 :
2408 1248 : let layer_size_image = TIMELINE_LAYER_SIZE
2409 1248 : .get_metric_with_label_values(&[
2410 1248 : &tenant_id,
2411 1248 : &shard_id,
2412 1248 : &timeline_id,
2413 1248 : MetricLayerKind::Image.into(),
2414 1248 : ])
2415 1248 : .unwrap();
2416 1248 :
2417 1248 : let layer_count_image = TIMELINE_LAYER_COUNT
2418 1248 : .get_metric_with_label_values(&[
2419 1248 : &tenant_id,
2420 1248 : &shard_id,
2421 1248 : &timeline_id,
2422 1248 : MetricLayerKind::Image.into(),
2423 1248 : ])
2424 1248 : .unwrap();
2425 1248 :
2426 1248 : let layer_size_delta = TIMELINE_LAYER_SIZE
2427 1248 : .get_metric_with_label_values(&[
2428 1248 : &tenant_id,
2429 1248 : &shard_id,
2430 1248 : &timeline_id,
2431 1248 : MetricLayerKind::Delta.into(),
2432 1248 : ])
2433 1248 : .unwrap();
2434 1248 :
2435 1248 : let layer_count_delta = TIMELINE_LAYER_COUNT
2436 1248 : .get_metric_with_label_values(&[
2437 1248 : &tenant_id,
2438 1248 : &shard_id,
2439 1248 : &timeline_id,
2440 1248 : MetricLayerKind::Delta.into(),
2441 1248 : ])
2442 1248 : .unwrap();
2443 1248 :
2444 1248 : let standby_horizon_gauge = STANDBY_HORIZON
2445 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2446 1248 : .unwrap();
2447 1248 : let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE
2448 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2449 1248 : .unwrap();
2450 1248 : let visible_physical_size_gauge = VISIBLE_PHYSICAL_SIZE
2451 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2452 1248 : .unwrap();
2453 1248 : // TODO: we shouldn't expose this metric
2454 1248 : let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
2455 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2456 1248 : .unwrap();
2457 1248 : let aux_file_size_gauge = AUX_FILE_SIZE
2458 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2459 1248 : .unwrap();
2460 1248 : // TODO use impl Trait syntax here once we have ability to use it: https://github.com/rust-lang/rust/issues/63065
2461 1248 : let directory_entries_count_gauge_closure = {
2462 1248 : let tenant_shard_id = *tenant_shard_id;
2463 1248 : let timeline_id_raw = *timeline_id_raw;
2464 0 : move || {
2465 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2466 0 : let shard_id = format!("{}", tenant_shard_id.shard_slug());
2467 0 : let timeline_id = timeline_id_raw.to_string();
2468 0 : let gauge: UIntGauge = DIRECTORY_ENTRIES_COUNT
2469 0 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2470 0 : .unwrap();
2471 0 : gauge
2472 0 : }
2473 : };
2474 1248 : let directory_entries_count_gauge: Lazy<UIntGauge, Box<dyn Send + Fn() -> UIntGauge>> =
2475 1248 : Lazy::new(Box::new(directory_entries_count_gauge_closure));
2476 1248 : let evictions = EVICTIONS
2477 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2478 1248 : .unwrap();
2479 1248 : let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder
2480 1248 : .build(&tenant_id, &shard_id, &timeline_id);
2481 1248 :
2482 1248 : let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT
2483 1248 : .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
2484 1248 : .unwrap();
2485 1248 :
2486 1248 : TimelineMetrics {
2487 1248 : tenant_id,
2488 1248 : shard_id,
2489 1248 : timeline_id,
2490 1248 : flush_time_histo,
2491 1248 : compact_time_histo,
2492 1248 : create_images_time_histo,
2493 1248 : logical_size_histo,
2494 1248 : imitate_logical_size_histo,
2495 1248 : garbage_collect_histo,
2496 1248 : find_gc_cutoffs_histo,
2497 1248 : load_layer_map_histo,
2498 1248 : last_record_gauge,
2499 1248 : pitr_history_size,
2500 1248 : archival_size,
2501 1248 : layer_size_image,
2502 1248 : layer_count_image,
2503 1248 : layer_size_delta,
2504 1248 : layer_count_delta,
2505 1248 : standby_horizon_gauge,
2506 1248 : resident_physical_size_gauge,
2507 1248 : visible_physical_size_gauge,
2508 1248 : current_logical_size_gauge,
2509 1248 : aux_file_size_gauge,
2510 1248 : directory_entries_count_gauge,
2511 1248 : evictions,
2512 1248 : evictions_with_low_residence_duration: std::sync::RwLock::new(
2513 1248 : evictions_with_low_residence_duration,
2514 1248 : ),
2515 1248 : valid_lsn_lease_count_gauge,
2516 1248 : shutdown: std::sync::atomic::AtomicBool::default(),
2517 1248 : }
2518 1248 : }
2519 :
2520 4536 : pub(crate) fn record_new_file_metrics(&self, sz: u64) {
2521 4536 : self.resident_physical_size_add(sz);
2522 4536 : }
2523 :
2524 1434 : pub(crate) fn resident_physical_size_sub(&self, sz: u64) {
2525 1434 : self.resident_physical_size_gauge.sub(sz);
2526 1434 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(sz);
2527 1434 : }
2528 :
2529 4626 : pub(crate) fn resident_physical_size_add(&self, sz: u64) {
2530 4626 : self.resident_physical_size_gauge.add(sz);
2531 4626 : crate::metrics::RESIDENT_PHYSICAL_SIZE_GLOBAL.add(sz);
2532 4626 : }
2533 :
2534 24 : pub(crate) fn resident_physical_size_get(&self) -> u64 {
2535 24 : self.resident_physical_size_gauge.get()
2536 24 : }
2537 :
2538 24 : pub(crate) fn shutdown(&self) {
2539 24 : let was_shutdown = self
2540 24 : .shutdown
2541 24 : .swap(true, std::sync::atomic::Ordering::Relaxed);
2542 24 :
2543 24 : if was_shutdown {
2544 : // this happens on tenant deletion because tenant first shuts down timelines, then
2545 : // invokes timeline deletion which first shuts down the timeline again.
2546 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2547 0 : return;
2548 24 : }
2549 24 :
2550 24 : let tenant_id = &self.tenant_id;
2551 24 : let timeline_id = &self.timeline_id;
2552 24 : let shard_id = &self.shard_id;
2553 24 : let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2554 24 : let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2555 24 : {
2556 24 : RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
2557 24 : let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2558 24 : }
2559 24 : let _ = VISIBLE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2560 24 : let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2561 24 : if let Some(metric) = Lazy::get(&DIRECTORY_ENTRIES_COUNT) {
2562 0 : let _ = metric.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2563 24 : }
2564 :
2565 24 : let _ = TIMELINE_ARCHIVE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2566 24 : let _ = PITR_HISTORY_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2567 24 :
2568 24 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&[
2569 24 : tenant_id,
2570 24 : shard_id,
2571 24 : timeline_id,
2572 24 : MetricLayerKind::Image.into(),
2573 24 : ]);
2574 24 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&[
2575 24 : tenant_id,
2576 24 : shard_id,
2577 24 : timeline_id,
2578 24 : MetricLayerKind::Image.into(),
2579 24 : ]);
2580 24 : let _ = TIMELINE_LAYER_SIZE.remove_label_values(&[
2581 24 : tenant_id,
2582 24 : shard_id,
2583 24 : timeline_id,
2584 24 : MetricLayerKind::Delta.into(),
2585 24 : ]);
2586 24 : let _ = TIMELINE_LAYER_COUNT.remove_label_values(&[
2587 24 : tenant_id,
2588 24 : shard_id,
2589 24 : timeline_id,
2590 24 : MetricLayerKind::Delta.into(),
2591 24 : ]);
2592 24 :
2593 24 : let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2594 24 : let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2595 24 : let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2596 24 :
2597 24 : self.evictions_with_low_residence_duration
2598 24 : .write()
2599 24 : .unwrap()
2600 24 : .remove(tenant_id, shard_id, timeline_id);
2601 :
2602 : // The following metrics are born outside of the TimelineMetrics lifecycle but still
2603 : // removed at the end of it. The idea is to have the metrics outlive the
2604 : // entity during which they're observed, e.g., the smgr metrics shall
2605 : // outlive an individual smgr connection, but not the timeline.
2606 :
2607 216 : for op in StorageTimeOperation::VARIANTS {
2608 192 : let _ = STORAGE_TIME_SUM_PER_TIMELINE.remove_label_values(&[
2609 192 : op,
2610 192 : tenant_id,
2611 192 : shard_id,
2612 192 : timeline_id,
2613 192 : ]);
2614 192 : let _ = STORAGE_TIME_COUNT_PER_TIMELINE.remove_label_values(&[
2615 192 : op,
2616 192 : tenant_id,
2617 192 : shard_id,
2618 192 : timeline_id,
2619 192 : ]);
2620 192 : }
2621 :
2622 72 : for op in STORAGE_IO_SIZE_OPERATIONS {
2623 48 : let _ = STORAGE_IO_SIZE.remove_label_values(&[op, tenant_id, shard_id, timeline_id]);
2624 48 : }
2625 :
2626 24 : let _ = SMGR_QUERY_STARTED_PER_TENANT_TIMELINE.remove_label_values(&[
2627 24 : SmgrQueryType::GetPageAtLsn.into(),
2628 24 : tenant_id,
2629 24 : shard_id,
2630 24 : timeline_id,
2631 24 : ]);
2632 24 : let _ = SMGR_QUERY_TIME_PER_TENANT_TIMELINE.remove_label_values(&[
2633 24 : SmgrQueryType::GetPageAtLsn.into(),
2634 24 : tenant_id,
2635 24 : shard_id,
2636 24 : timeline_id,
2637 24 : ]);
2638 24 : }
2639 : }
2640 :
2641 18 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
2642 18 : // Only shard zero deals in synthetic sizes
2643 18 : if tenant_shard_id.is_shard_zero() {
2644 18 : let tid = tenant_shard_id.tenant_id.to_string();
2645 18 : let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
2646 18 : }
2647 :
2648 18 : tenant_throttling::remove_tenant_metrics(tenant_shard_id);
2649 18 :
2650 18 : // we leave the BROKEN_TENANTS_SET entry if any
2651 18 : }
2652 :
2653 : use futures::Future;
2654 : use pin_project_lite::pin_project;
2655 : use std::collections::HashMap;
2656 : use std::num::NonZeroUsize;
2657 : use std::pin::Pin;
2658 : use std::sync::atomic::AtomicU64;
2659 : use std::sync::{Arc, Mutex};
2660 : use std::task::{Context, Poll};
2661 : use std::time::{Duration, Instant};
2662 :
2663 : use crate::context::{PageContentKind, RequestContext};
2664 : use crate::task_mgr::TaskKind;
2665 : use crate::tenant::mgr::TenantSlot;
2666 : use crate::tenant::tasks::BackgroundLoopKind;
2667 :
2668 : /// Maintain a per timeline gauge in addition to the global gauge.
2669 : pub(crate) struct PerTimelineRemotePhysicalSizeGauge {
2670 : last_set: AtomicU64,
2671 : gauge: UIntGauge,
2672 : }
2673 :
2674 : impl PerTimelineRemotePhysicalSizeGauge {
2675 1278 : fn new(per_timeline_gauge: UIntGauge) -> Self {
2676 1278 : Self {
2677 1278 : last_set: AtomicU64::new(0),
2678 1278 : gauge: per_timeline_gauge,
2679 1278 : }
2680 1278 : }
2681 5471 : pub(crate) fn set(&self, sz: u64) {
2682 5471 : self.gauge.set(sz);
2683 5471 : let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed);
2684 5471 : if sz < prev {
2685 78 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz);
2686 5393 : } else {
2687 5393 : REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev);
2688 5393 : };
2689 5471 : }
2690 6 : pub(crate) fn get(&self) -> u64 {
2691 6 : self.gauge.get()
2692 6 : }
2693 : }
2694 :
2695 : impl Drop for PerTimelineRemotePhysicalSizeGauge {
2696 54 : fn drop(&mut self) {
2697 54 : REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed));
2698 54 : }
2699 : }
2700 :
2701 : pub(crate) struct RemoteTimelineClientMetrics {
2702 : tenant_id: String,
2703 : shard_id: String,
2704 : timeline_id: String,
2705 : pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge,
2706 : calls: Mutex<HashMap<(&'static str, &'static str), IntCounterPair>>,
2707 : bytes_started_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
2708 : bytes_finished_counter: Mutex<HashMap<(&'static str, &'static str), IntCounter>>,
2709 : }
2710 :
2711 : impl RemoteTimelineClientMetrics {
2712 1278 : pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self {
2713 1278 : let tenant_id_str = tenant_shard_id.tenant_id.to_string();
2714 1278 : let shard_id_str = format!("{}", tenant_shard_id.shard_slug());
2715 1278 : let timeline_id_str = timeline_id.to_string();
2716 1278 :
2717 1278 : let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new(
2718 1278 : REMOTE_PHYSICAL_SIZE
2719 1278 : .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str])
2720 1278 : .unwrap(),
2721 1278 : );
2722 1278 :
2723 1278 : RemoteTimelineClientMetrics {
2724 1278 : tenant_id: tenant_id_str,
2725 1278 : shard_id: shard_id_str,
2726 1278 : timeline_id: timeline_id_str,
2727 1278 : calls: Mutex::new(HashMap::default()),
2728 1278 : bytes_started_counter: Mutex::new(HashMap::default()),
2729 1278 : bytes_finished_counter: Mutex::new(HashMap::default()),
2730 1278 : remote_physical_size_gauge,
2731 1278 : }
2732 1278 : }
2733 :
2734 8114 : pub fn remote_operation_time(
2735 8114 : &self,
2736 8114 : file_kind: &RemoteOpFileKind,
2737 8114 : op_kind: &RemoteOpKind,
2738 8114 : status: &'static str,
2739 8114 : ) -> Histogram {
2740 8114 : let key = (file_kind.as_str(), op_kind.as_str(), status);
2741 8114 : REMOTE_OPERATION_TIME
2742 8114 : .get_metric_with_label_values(&[key.0, key.1, key.2])
2743 8114 : .unwrap()
2744 8114 : }
2745 :
2746 19153 : fn calls_counter_pair(
2747 19153 : &self,
2748 19153 : file_kind: &RemoteOpFileKind,
2749 19153 : op_kind: &RemoteOpKind,
2750 19153 : ) -> IntCounterPair {
2751 19153 : let mut guard = self.calls.lock().unwrap();
2752 19153 : let key = (file_kind.as_str(), op_kind.as_str());
2753 19153 : let metric = guard.entry(key).or_insert_with(move || {
2754 2254 : REMOTE_TIMELINE_CLIENT_CALLS
2755 2254 : .get_metric_with_label_values(&[
2756 2254 : &self.tenant_id,
2757 2254 : &self.shard_id,
2758 2254 : &self.timeline_id,
2759 2254 : key.0,
2760 2254 : key.1,
2761 2254 : ])
2762 2254 : .unwrap()
2763 19153 : });
2764 19153 : metric.clone()
2765 19153 : }
2766 :
2767 4560 : fn bytes_started_counter(
2768 4560 : &self,
2769 4560 : file_kind: &RemoteOpFileKind,
2770 4560 : op_kind: &RemoteOpKind,
2771 4560 : ) -> IntCounter {
2772 4560 : let mut guard = self.bytes_started_counter.lock().unwrap();
2773 4560 : let key = (file_kind.as_str(), op_kind.as_str());
2774 4560 : let metric = guard.entry(key).or_insert_with(move || {
2775 870 : REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER
2776 870 : .get_metric_with_label_values(&[
2777 870 : &self.tenant_id,
2778 870 : &self.shard_id,
2779 870 : &self.timeline_id,
2780 870 : key.0,
2781 870 : key.1,
2782 870 : ])
2783 870 : .unwrap()
2784 4560 : });
2785 4560 : metric.clone()
2786 4560 : }
2787 :
2788 8373 : fn bytes_finished_counter(
2789 8373 : &self,
2790 8373 : file_kind: &RemoteOpFileKind,
2791 8373 : op_kind: &RemoteOpKind,
2792 8373 : ) -> IntCounter {
2793 8373 : let mut guard = self.bytes_finished_counter.lock().unwrap();
2794 8373 : let key = (file_kind.as_str(), op_kind.as_str());
2795 8373 : let metric = guard.entry(key).or_insert_with(move || {
2796 870 : REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER
2797 870 : .get_metric_with_label_values(&[
2798 870 : &self.tenant_id,
2799 870 : &self.shard_id,
2800 870 : &self.timeline_id,
2801 870 : key.0,
2802 870 : key.1,
2803 870 : ])
2804 870 : .unwrap()
2805 8373 : });
2806 8373 : metric.clone()
2807 8373 : }
2808 : }
2809 :
2810 : #[cfg(test)]
2811 : impl RemoteTimelineClientMetrics {
2812 18 : pub fn get_bytes_started_counter_value(
2813 18 : &self,
2814 18 : file_kind: &RemoteOpFileKind,
2815 18 : op_kind: &RemoteOpKind,
2816 18 : ) -> Option<u64> {
2817 18 : let guard = self.bytes_started_counter.lock().unwrap();
2818 18 : let key = (file_kind.as_str(), op_kind.as_str());
2819 18 : guard.get(&key).map(|counter| counter.get())
2820 18 : }
2821 :
2822 18 : pub fn get_bytes_finished_counter_value(
2823 18 : &self,
2824 18 : file_kind: &RemoteOpFileKind,
2825 18 : op_kind: &RemoteOpKind,
2826 18 : ) -> Option<u64> {
2827 18 : let guard = self.bytes_finished_counter.lock().unwrap();
2828 18 : let key = (file_kind.as_str(), op_kind.as_str());
2829 18 : guard.get(&key).map(|counter| counter.get())
2830 18 : }
2831 : }
2832 :
2833 : /// See [`RemoteTimelineClientMetrics::call_begin`].
2834 : #[must_use]
2835 : pub(crate) struct RemoteTimelineClientCallMetricGuard {
2836 : /// Decremented on drop.
2837 : calls_counter_pair: Option<IntCounterPair>,
2838 : /// If Some(), this references the bytes_finished metric, and we increment it by the given `u64` on drop.
2839 : bytes_finished: Option<(IntCounter, u64)>,
2840 : }
2841 :
2842 : impl RemoteTimelineClientCallMetricGuard {
2843 : /// Consume this guard object without performing the metric updates it would do on `drop()`.
2844 : /// The caller vouches to do the metric updates manually.
2845 10326 : pub fn will_decrement_manually(mut self) {
2846 10326 : let RemoteTimelineClientCallMetricGuard {
2847 10326 : calls_counter_pair,
2848 10326 : bytes_finished,
2849 10326 : } = &mut self;
2850 10326 : calls_counter_pair.take();
2851 10326 : bytes_finished.take();
2852 10326 : }
2853 : }
2854 :
2855 : impl Drop for RemoteTimelineClientCallMetricGuard {
2856 10404 : fn drop(&mut self) {
2857 10404 : let RemoteTimelineClientCallMetricGuard {
2858 10404 : calls_counter_pair,
2859 10404 : bytes_finished,
2860 10404 : } = self;
2861 10404 : if let Some(guard) = calls_counter_pair.take() {
2862 78 : guard.dec();
2863 10326 : }
2864 10404 : if let Some((bytes_finished_metric, value)) = bytes_finished {
2865 0 : bytes_finished_metric.inc_by(*value);
2866 10404 : }
2867 10404 : }
2868 : }
2869 :
2870 : /// The enum variants communicate to the [`RemoteTimelineClientMetrics`] whether to
2871 : /// track the byte size of this call in applicable metric(s).
2872 : pub(crate) enum RemoteTimelineClientMetricsCallTrackSize {
2873 : /// Do not account for this call's byte size in any metrics.
2874 : /// The `reason` field is there to make the call sites self-documenting
2875 : /// about why they don't need the metric.
2876 : DontTrackSize { reason: &'static str },
2877 : /// Track the byte size of the call in applicable metric(s).
2878 : Bytes(u64),
2879 : }
2880 :
2881 : impl RemoteTimelineClientMetrics {
2882 : /// Update the metrics that change when a call to the remote timeline client instance starts.
2883 : ///
2884 : /// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
2885 : /// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
2886 : /// is more suitable.
2887 : /// Never do both.
2888 10404 : pub(crate) fn call_begin(
2889 10404 : &self,
2890 10404 : file_kind: &RemoteOpFileKind,
2891 10404 : op_kind: &RemoteOpKind,
2892 10404 : size: RemoteTimelineClientMetricsCallTrackSize,
2893 10404 : ) -> RemoteTimelineClientCallMetricGuard {
2894 10404 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
2895 10404 : calls_counter_pair.inc();
2896 :
2897 10404 : let bytes_finished = match size {
2898 5844 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {
2899 5844 : // nothing to do
2900 5844 : None
2901 : }
2902 4560 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
2903 4560 : self.bytes_started_counter(file_kind, op_kind).inc_by(size);
2904 4560 : let finished_counter = self.bytes_finished_counter(file_kind, op_kind);
2905 4560 : Some((finished_counter, size))
2906 : }
2907 : };
2908 10404 : RemoteTimelineClientCallMetricGuard {
2909 10404 : calls_counter_pair: Some(calls_counter_pair),
2910 10404 : bytes_finished,
2911 10404 : }
2912 10404 : }
2913 :
2914 : /// Manually udpate the metrics that track completions, instead of using the guard object.
2915 : /// Using the guard object is generally preferable.
2916 : /// See [`call_begin`](Self::call_begin) for more context.
2917 8749 : pub(crate) fn call_end(
2918 8749 : &self,
2919 8749 : file_kind: &RemoteOpFileKind,
2920 8749 : op_kind: &RemoteOpKind,
2921 8749 : size: RemoteTimelineClientMetricsCallTrackSize,
2922 8749 : ) {
2923 8749 : let calls_counter_pair = self.calls_counter_pair(file_kind, op_kind);
2924 8749 : calls_counter_pair.dec();
2925 8749 : match size {
2926 4936 : RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: _reason } => {}
2927 3813 : RemoteTimelineClientMetricsCallTrackSize::Bytes(size) => {
2928 3813 : self.bytes_finished_counter(file_kind, op_kind).inc_by(size);
2929 3813 : }
2930 : }
2931 8749 : }
2932 : }
2933 :
2934 : impl Drop for RemoteTimelineClientMetrics {
2935 54 : fn drop(&mut self) {
2936 54 : let RemoteTimelineClientMetrics {
2937 54 : tenant_id,
2938 54 : shard_id,
2939 54 : timeline_id,
2940 54 : remote_physical_size_gauge,
2941 54 : calls,
2942 54 : bytes_started_counter,
2943 54 : bytes_finished_counter,
2944 54 : } = self;
2945 72 : for ((a, b), _) in calls.get_mut().unwrap().drain() {
2946 72 : let mut res = [Ok(()), Ok(())];
2947 72 : REMOTE_TIMELINE_CLIENT_CALLS
2948 72 : .remove_label_values(&mut res, &[tenant_id, shard_id, timeline_id, a, b]);
2949 72 : // don't care about results
2950 72 : }
2951 54 : for ((a, b), _) in bytes_started_counter.get_mut().unwrap().drain() {
2952 18 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_STARTED_COUNTER.remove_label_values(&[
2953 18 : tenant_id,
2954 18 : shard_id,
2955 18 : timeline_id,
2956 18 : a,
2957 18 : b,
2958 18 : ]);
2959 18 : }
2960 54 : for ((a, b), _) in bytes_finished_counter.get_mut().unwrap().drain() {
2961 18 : let _ = REMOTE_TIMELINE_CLIENT_BYTES_FINISHED_COUNTER.remove_label_values(&[
2962 18 : tenant_id,
2963 18 : shard_id,
2964 18 : timeline_id,
2965 18 : a,
2966 18 : b,
2967 18 : ]);
2968 18 : }
2969 54 : {
2970 54 : let _ = remote_physical_size_gauge; // use to avoid 'unused' warning in desctructuring above
2971 54 : let _ = REMOTE_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]);
2972 54 : }
2973 54 : }
2974 : }
2975 :
2976 : /// Wrapper future that measures the time spent by a remote storage operation,
2977 : /// and records the time and success/failure as a prometheus metric.
2978 : pub(crate) trait MeasureRemoteOp: Sized {
2979 8435 : fn measure_remote_op(
2980 8435 : self,
2981 8435 : file_kind: RemoteOpFileKind,
2982 8435 : op: RemoteOpKind,
2983 8435 : metrics: Arc<RemoteTimelineClientMetrics>,
2984 8435 : ) -> MeasuredRemoteOp<Self> {
2985 8435 : let start = Instant::now();
2986 8435 : MeasuredRemoteOp {
2987 8435 : inner: self,
2988 8435 : file_kind,
2989 8435 : op,
2990 8435 : start,
2991 8435 : metrics,
2992 8435 : }
2993 8435 : }
2994 : }
2995 :
2996 : impl<T: Sized> MeasureRemoteOp for T {}
2997 :
2998 : pin_project! {
2999 : pub(crate) struct MeasuredRemoteOp<F>
3000 : {
3001 : #[pin]
3002 : inner: F,
3003 : file_kind: RemoteOpFileKind,
3004 : op: RemoteOpKind,
3005 : start: Instant,
3006 : metrics: Arc<RemoteTimelineClientMetrics>,
3007 : }
3008 : }
3009 :
3010 : impl<F: Future<Output = Result<O, E>>, O, E> Future for MeasuredRemoteOp<F> {
3011 : type Output = Result<O, E>;
3012 :
3013 127800 : fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3014 127800 : let this = self.project();
3015 127800 : let poll_result = this.inner.poll(cx);
3016 127800 : if let Poll::Ready(ref res) = poll_result {
3017 8114 : let duration = this.start.elapsed();
3018 8114 : let status = if res.is_ok() { &"success" } else { &"failure" };
3019 8114 : this.metrics
3020 8114 : .remote_operation_time(this.file_kind, this.op, status)
3021 8114 : .observe(duration.as_secs_f64());
3022 119686 : }
3023 127800 : poll_result
3024 127800 : }
3025 : }
3026 :
3027 : pub mod tokio_epoll_uring {
3028 : use metrics::{register_int_counter, UIntGauge};
3029 : use once_cell::sync::Lazy;
3030 :
3031 : pub struct Collector {
3032 : descs: Vec<metrics::core::Desc>,
3033 : systems_created: UIntGauge,
3034 : systems_destroyed: UIntGauge,
3035 : }
3036 :
3037 : impl metrics::core::Collector for Collector {
3038 0 : fn desc(&self) -> Vec<&metrics::core::Desc> {
3039 0 : self.descs.iter().collect()
3040 0 : }
3041 :
3042 0 : fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
3043 0 : let mut mfs = Vec::with_capacity(Self::NMETRICS);
3044 0 : let tokio_epoll_uring::metrics::Metrics {
3045 0 : systems_created,
3046 0 : systems_destroyed,
3047 0 : } = tokio_epoll_uring::metrics::global();
3048 0 : self.systems_created.set(systems_created);
3049 0 : mfs.extend(self.systems_created.collect());
3050 0 : self.systems_destroyed.set(systems_destroyed);
3051 0 : mfs.extend(self.systems_destroyed.collect());
3052 0 : mfs
3053 0 : }
3054 : }
3055 :
3056 : impl Collector {
3057 : const NMETRICS: usize = 2;
3058 :
3059 : #[allow(clippy::new_without_default)]
3060 0 : pub fn new() -> Self {
3061 0 : let mut descs = Vec::new();
3062 0 :
3063 0 : let systems_created = UIntGauge::new(
3064 0 : "pageserver_tokio_epoll_uring_systems_created",
3065 0 : "counter of tokio-epoll-uring systems that were created",
3066 0 : )
3067 0 : .unwrap();
3068 0 : descs.extend(
3069 0 : metrics::core::Collector::desc(&systems_created)
3070 0 : .into_iter()
3071 0 : .cloned(),
3072 0 : );
3073 0 :
3074 0 : let systems_destroyed = UIntGauge::new(
3075 0 : "pageserver_tokio_epoll_uring_systems_destroyed",
3076 0 : "counter of tokio-epoll-uring systems that were destroyed",
3077 0 : )
3078 0 : .unwrap();
3079 0 : descs.extend(
3080 0 : metrics::core::Collector::desc(&systems_destroyed)
3081 0 : .into_iter()
3082 0 : .cloned(),
3083 0 : );
3084 0 :
3085 0 : Self {
3086 0 : descs,
3087 0 : systems_created,
3088 0 : systems_destroyed,
3089 0 : }
3090 0 : }
3091 : }
3092 :
3093 306 : pub(crate) static THREAD_LOCAL_LAUNCH_SUCCESSES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3094 306 : register_int_counter!(
3095 306 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_success_count",
3096 306 : "Number of times where thread_local_system creation spanned multiple executor threads",
3097 306 : )
3098 306 : .unwrap()
3099 306 : });
3100 :
3101 0 : pub(crate) static THREAD_LOCAL_LAUNCH_FAILURES: Lazy<metrics::IntCounter> = Lazy::new(|| {
3102 0 : register_int_counter!(
3103 0 : "pageserver_tokio_epoll_uring_pageserver_thread_local_launch_failures_count",
3104 0 : "Number of times thread_local_system creation failed and was retried after back-off.",
3105 0 : )
3106 0 : .unwrap()
3107 0 : });
3108 : }
3109 :
3110 : pub(crate) mod tenant_throttling {
3111 : use metrics::{register_int_counter_vec, IntCounter};
3112 : use once_cell::sync::Lazy;
3113 : use utils::shard::TenantShardId;
3114 :
3115 : use crate::tenant::{self, throttle::Metric};
3116 :
3117 : struct GlobalAndPerTenantIntCounter {
3118 : global: IntCounter,
3119 : per_tenant: IntCounter,
3120 : }
3121 :
3122 : impl GlobalAndPerTenantIntCounter {
3123 : #[inline(always)]
3124 0 : pub(crate) fn inc(&self) {
3125 0 : self.inc_by(1)
3126 0 : }
3127 : #[inline(always)]
3128 0 : pub(crate) fn inc_by(&self, n: u64) {
3129 0 : self.global.inc_by(n);
3130 0 : self.per_tenant.inc_by(n);
3131 0 : }
3132 : }
3133 :
3134 : pub(crate) struct TimelineGet {
3135 : count_accounted_start: GlobalAndPerTenantIntCounter,
3136 : count_accounted_finish: GlobalAndPerTenantIntCounter,
3137 : wait_time: GlobalAndPerTenantIntCounter,
3138 : count_throttled: GlobalAndPerTenantIntCounter,
3139 : }
3140 :
3141 522 : static COUNT_ACCOUNTED_START: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3142 522 : register_int_counter_vec!(
3143 522 : "pageserver_tenant_throttling_count_accounted_start_global",
3144 522 : "Count of tenant throttling starts, by kind of throttle.",
3145 522 : &["kind"]
3146 522 : )
3147 522 : .unwrap()
3148 522 : });
3149 522 : static COUNT_ACCOUNTED_START_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3150 522 : register_int_counter_vec!(
3151 522 : "pageserver_tenant_throttling_count_accounted_start",
3152 522 : "Count of tenant throttling starts, by kind of throttle.",
3153 522 : &["kind", "tenant_id", "shard_id"]
3154 522 : )
3155 522 : .unwrap()
3156 522 : });
3157 522 : static COUNT_ACCOUNTED_FINISH: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3158 522 : register_int_counter_vec!(
3159 522 : "pageserver_tenant_throttling_count_accounted_finish_global",
3160 522 : "Count of tenant throttling finishes, by kind of throttle.",
3161 522 : &["kind"]
3162 522 : )
3163 522 : .unwrap()
3164 522 : });
3165 522 : static COUNT_ACCOUNTED_FINISH_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3166 522 : register_int_counter_vec!(
3167 522 : "pageserver_tenant_throttling_count_accounted_finish",
3168 522 : "Count of tenant throttling finishes, by kind of throttle.",
3169 522 : &["kind", "tenant_id", "shard_id"]
3170 522 : )
3171 522 : .unwrap()
3172 522 : });
3173 522 : static WAIT_USECS: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3174 522 : register_int_counter_vec!(
3175 522 : "pageserver_tenant_throttling_wait_usecs_sum_global",
3176 522 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3177 522 : &["kind"]
3178 522 : )
3179 522 : .unwrap()
3180 522 : });
3181 522 : static WAIT_USECS_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3182 522 : register_int_counter_vec!(
3183 522 : "pageserver_tenant_throttling_wait_usecs_sum",
3184 522 : "Sum of microseconds that spent waiting throttle by kind of throttle.",
3185 522 : &["kind", "tenant_id", "shard_id"]
3186 522 : )
3187 522 : .unwrap()
3188 522 : });
3189 :
3190 522 : static WAIT_COUNT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3191 522 : register_int_counter_vec!(
3192 522 : "pageserver_tenant_throttling_count_global",
3193 522 : "Count of tenant throttlings, by kind of throttle.",
3194 522 : &["kind"]
3195 522 : )
3196 522 : .unwrap()
3197 522 : });
3198 522 : static WAIT_COUNT_PER_TENANT: Lazy<metrics::IntCounterVec> = Lazy::new(|| {
3199 522 : register_int_counter_vec!(
3200 522 : "pageserver_tenant_throttling_count",
3201 522 : "Count of tenant throttlings, by kind of throttle.",
3202 522 : &["kind", "tenant_id", "shard_id"]
3203 522 : )
3204 522 : .unwrap()
3205 522 : });
3206 :
3207 : const KIND: &str = "timeline_get";
3208 :
3209 : impl TimelineGet {
3210 576 : pub(crate) fn new(tenant_shard_id: &TenantShardId) -> Self {
3211 576 : TimelineGet {
3212 576 : count_accounted_start: {
3213 576 : GlobalAndPerTenantIntCounter {
3214 576 : global: COUNT_ACCOUNTED_START.with_label_values(&[KIND]),
3215 576 : per_tenant: COUNT_ACCOUNTED_START_PER_TENANT.with_label_values(&[
3216 576 : KIND,
3217 576 : &tenant_shard_id.tenant_id.to_string(),
3218 576 : &tenant_shard_id.shard_slug().to_string(),
3219 576 : ]),
3220 576 : }
3221 576 : },
3222 576 : count_accounted_finish: {
3223 576 : GlobalAndPerTenantIntCounter {
3224 576 : global: COUNT_ACCOUNTED_FINISH.with_label_values(&[KIND]),
3225 576 : per_tenant: COUNT_ACCOUNTED_FINISH_PER_TENANT.with_label_values(&[
3226 576 : KIND,
3227 576 : &tenant_shard_id.tenant_id.to_string(),
3228 576 : &tenant_shard_id.shard_slug().to_string(),
3229 576 : ]),
3230 576 : }
3231 576 : },
3232 576 : wait_time: {
3233 576 : GlobalAndPerTenantIntCounter {
3234 576 : global: WAIT_USECS.with_label_values(&[KIND]),
3235 576 : per_tenant: WAIT_USECS_PER_TENANT.with_label_values(&[
3236 576 : KIND,
3237 576 : &tenant_shard_id.tenant_id.to_string(),
3238 576 : &tenant_shard_id.shard_slug().to_string(),
3239 576 : ]),
3240 576 : }
3241 576 : },
3242 576 : count_throttled: {
3243 576 : GlobalAndPerTenantIntCounter {
3244 576 : global: WAIT_COUNT.with_label_values(&[KIND]),
3245 576 : per_tenant: WAIT_COUNT_PER_TENANT.with_label_values(&[
3246 576 : KIND,
3247 576 : &tenant_shard_id.tenant_id.to_string(),
3248 576 : &tenant_shard_id.shard_slug().to_string(),
3249 576 : ]),
3250 576 : }
3251 576 : },
3252 576 : }
3253 576 : }
3254 : }
3255 :
3256 0 : pub(crate) fn preinitialize_global_metrics() {
3257 0 : Lazy::force(&COUNT_ACCOUNTED_START);
3258 0 : Lazy::force(&COUNT_ACCOUNTED_FINISH);
3259 0 : Lazy::force(&WAIT_USECS);
3260 0 : Lazy::force(&WAIT_COUNT);
3261 0 : }
3262 :
3263 18 : pub(crate) fn remove_tenant_metrics(tenant_shard_id: &TenantShardId) {
3264 72 : for m in &[
3265 18 : &COUNT_ACCOUNTED_START_PER_TENANT,
3266 18 : &COUNT_ACCOUNTED_FINISH_PER_TENANT,
3267 18 : &WAIT_USECS_PER_TENANT,
3268 18 : &WAIT_COUNT_PER_TENANT,
3269 72 : ] {
3270 72 : let _ = m.remove_label_values(&[
3271 72 : KIND,
3272 72 : &tenant_shard_id.tenant_id.to_string(),
3273 72 : &tenant_shard_id.shard_slug().to_string(),
3274 72 : ]);
3275 72 : }
3276 18 : }
3277 :
3278 : impl Metric for TimelineGet {
3279 : #[inline(always)]
3280 0 : fn accounting_start(&self) {
3281 0 : self.count_accounted_start.inc();
3282 0 : }
3283 : #[inline(always)]
3284 0 : fn accounting_finish(&self) {
3285 0 : self.count_accounted_finish.inc();
3286 0 : }
3287 : #[inline(always)]
3288 0 : fn observe_throttling(
3289 0 : &self,
3290 0 : tenant::throttle::Observation { wait_time }: &tenant::throttle::Observation,
3291 0 : ) {
3292 0 : let val = u64::try_from(wait_time.as_micros()).unwrap();
3293 0 : self.wait_time.inc_by(val);
3294 0 : self.count_throttled.inc();
3295 0 : }
3296 : }
3297 : }
3298 :
3299 : pub(crate) mod disk_usage_based_eviction {
3300 : use super::*;
3301 :
3302 : pub(crate) struct Metrics {
3303 : pub(crate) tenant_collection_time: Histogram,
3304 : pub(crate) tenant_layer_count: Histogram,
3305 : pub(crate) layers_collected: IntCounter,
3306 : pub(crate) layers_selected: IntCounter,
3307 : pub(crate) layers_evicted: IntCounter,
3308 : }
3309 :
3310 : impl Default for Metrics {
3311 0 : fn default() -> Self {
3312 0 : let tenant_collection_time = register_histogram!(
3313 0 : "pageserver_disk_usage_based_eviction_tenant_collection_seconds",
3314 0 : "Time spent collecting layers from a tenant -- not normalized by collected layer amount",
3315 0 : vec![0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
3316 0 : )
3317 0 : .unwrap();
3318 0 :
3319 0 : let tenant_layer_count = register_histogram!(
3320 0 : "pageserver_disk_usage_based_eviction_tenant_collected_layers",
3321 0 : "Amount of layers gathered from a tenant",
3322 0 : vec![5.0, 50.0, 500.0, 5000.0, 50000.0]
3323 0 : )
3324 0 : .unwrap();
3325 0 :
3326 0 : let layers_collected = register_int_counter!(
3327 0 : "pageserver_disk_usage_based_eviction_collected_layers_total",
3328 0 : "Amount of layers collected"
3329 0 : )
3330 0 : .unwrap();
3331 0 :
3332 0 : let layers_selected = register_int_counter!(
3333 0 : "pageserver_disk_usage_based_eviction_select_layers_total",
3334 0 : "Amount of layers selected"
3335 0 : )
3336 0 : .unwrap();
3337 0 :
3338 0 : let layers_evicted = register_int_counter!(
3339 0 : "pageserver_disk_usage_based_eviction_evicted_layers_total",
3340 0 : "Amount of layers successfully evicted"
3341 0 : )
3342 0 : .unwrap();
3343 0 :
3344 0 : Self {
3345 0 : tenant_collection_time,
3346 0 : tenant_layer_count,
3347 0 : layers_collected,
3348 0 : layers_selected,
3349 0 : layers_evicted,
3350 0 : }
3351 0 : }
3352 : }
3353 :
3354 : pub(crate) static METRICS: Lazy<Metrics> = Lazy::new(Metrics::default);
3355 : }
3356 :
3357 504 : static TOKIO_EXECUTOR_THREAD_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| {
3358 504 : register_uint_gauge_vec!(
3359 504 : "pageserver_tokio_executor_thread_configured_count",
3360 504 : "Total number of configued tokio executor threads in the process.
3361 504 : The `setup` label denotes whether we're running with multiple runtimes or a single runtime.",
3362 504 : &["setup"],
3363 504 : )
3364 504 : .unwrap()
3365 504 : });
3366 :
3367 504 : pub(crate) fn set_tokio_runtime_setup(setup: &str, num_threads: NonZeroUsize) {
3368 : static SERIALIZE: std::sync::Mutex<()> = std::sync::Mutex::new(());
3369 504 : let _guard = SERIALIZE.lock().unwrap();
3370 504 : TOKIO_EXECUTOR_THREAD_COUNT.reset();
3371 504 : TOKIO_EXECUTOR_THREAD_COUNT
3372 504 : .get_metric_with_label_values(&[setup])
3373 504 : .unwrap()
3374 504 : .set(u64::try_from(num_threads.get()).unwrap());
3375 504 : }
3376 :
3377 0 : pub fn preinitialize_metrics() {
3378 0 : // Python tests need these and on some we do alerting.
3379 0 : //
3380 0 : // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of
3381 0 : // order:
3382 0 : // - global metrics reside in a Lazy<PageserverMetrics>
3383 0 : // - access via crate::metrics::PS_METRICS.some_metric.inc()
3384 0 : // - could move the statics into TimelineMetrics::new()?
3385 0 :
3386 0 : // counters
3387 0 : [
3388 0 : &UNEXPECTED_ONDEMAND_DOWNLOADS,
3389 0 : &WALRECEIVER_STARTED_CONNECTIONS,
3390 0 : &WALRECEIVER_BROKER_UPDATES,
3391 0 : &WALRECEIVER_CANDIDATES_ADDED,
3392 0 : &WALRECEIVER_CANDIDATES_REMOVED,
3393 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_FAILURES,
3394 0 : &tokio_epoll_uring::THREAD_LOCAL_LAUNCH_SUCCESSES,
3395 0 : &REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
3396 0 : &REMOTE_ONDEMAND_DOWNLOADED_BYTES,
3397 0 : &CIRCUIT_BREAKERS_BROKEN,
3398 0 : &CIRCUIT_BREAKERS_UNBROKEN,
3399 0 : ]
3400 0 : .into_iter()
3401 0 : .for_each(|c| {
3402 0 : Lazy::force(c);
3403 0 : });
3404 0 :
3405 0 : // Deletion queue stats
3406 0 : Lazy::force(&DELETION_QUEUE);
3407 0 :
3408 0 : // Tenant stats
3409 0 : Lazy::force(&TENANT);
3410 0 :
3411 0 : // Tenant manager stats
3412 0 : Lazy::force(&TENANT_MANAGER);
3413 0 :
3414 0 : Lazy::force(&crate::tenant::storage_layer::layer::LAYER_IMPL_METRICS);
3415 0 : Lazy::force(&disk_usage_based_eviction::METRICS);
3416 :
3417 0 : for state_name in pageserver_api::models::TenantState::VARIANTS {
3418 0 : // initialize the metric for all gauges, otherwise the time series might seemingly show
3419 0 : // values from last restart.
3420 0 : TENANT_STATE_METRIC.with_label_values(&[state_name]).set(0);
3421 0 : }
3422 :
3423 : // countervecs
3424 0 : [
3425 0 : &BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT,
3426 0 : &SMGR_QUERY_STARTED_GLOBAL,
3427 0 : ]
3428 0 : .into_iter()
3429 0 : .for_each(|c| {
3430 0 : Lazy::force(c);
3431 0 : });
3432 0 :
3433 0 : // gauges
3434 0 : WALRECEIVER_ACTIVE_MANAGERS.get();
3435 0 :
3436 0 : // histograms
3437 0 : [
3438 0 : &READ_NUM_LAYERS_VISITED,
3439 0 : &VEC_READ_NUM_LAYERS_VISITED,
3440 0 : &WAIT_LSN_TIME,
3441 0 : &WAL_REDO_TIME,
3442 0 : &WAL_REDO_RECORDS_HISTOGRAM,
3443 0 : &WAL_REDO_BYTES_HISTOGRAM,
3444 0 : &WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM,
3445 0 : ]
3446 0 : .into_iter()
3447 0 : .for_each(|h| {
3448 0 : Lazy::force(h);
3449 0 : });
3450 0 :
3451 0 : // Custom
3452 0 : Lazy::force(&RECONSTRUCT_TIME);
3453 0 : Lazy::force(&BASEBACKUP_QUERY_TIME);
3454 0 : Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
3455 0 :
3456 0 : tenant_throttling::preinitialize_global_metrics();
3457 0 : }
|