Line data Source code
1 : //! This module implements the pageserver-global disk-usage-based layer eviction task.
2 : //!
3 : //! # Mechanics
4 : //!
5 : //! Function `launch_disk_usage_global_eviction_task` starts a pageserver-global background
6 : //! loop that evicts layers in response to a shortage of available bytes
7 : //! in the $repo/tenants directory's filesystem.
8 : //!
9 : //! The loop runs periodically at a configurable `period`.
10 : //!
11 : //! Each loop iteration uses `statvfs` to determine filesystem-level space usage.
12 : //! It compares the returned usage data against two different types of thresholds.
13 : //! The iteration tries to evict layers until app-internal accounting says we should be below the thresholds.
14 : //! We cross-check this internal accounting with the real world by making another `statvfs` at the end of the iteration.
15 : //! We're good if that second statvfs shows that we're _actually_ below the configured thresholds.
16 : //! If we're still above one or more thresholds, we emit a warning log message, leaving it to the operator to investigate further.
17 : //!
18 : //! # Eviction Policy
19 : //!
20 : //! There are two thresholds:
21 : //! `max_usage_pct` is the relative available space, expressed in percent of the total filesystem space.
22 : //! If the actual usage is higher, the threshold is exceeded.
23 : //! `min_avail_bytes` is the absolute available space in bytes.
24 : //! If the actual usage is lower, the threshold is exceeded.
25 : //! If either of these thresholds is exceeded, the system is considered to have "disk pressure", and eviction
26 : //! is performed on the next iteration, to release disk space and bring the usage below the thresholds again.
27 : //! The iteration evicts layers in LRU fashion, but, with a weak reservation per tenant.
28 : //! The reservation is to keep the most recently accessed X bytes per tenant resident.
29 : //! If we cannot relieve pressure by evicting layers outside of the reservation, we
30 : //! start evicting layers that are part of the reservation, LRU first.
31 : //!
32 : //! The value for the per-tenant reservation is referred to as `tenant_min_resident_size`
33 : //! throughout the code, but, no actual variable carries that name.
34 : //! The per-tenant default value is the `max(tenant's layer file sizes, regardless of local or remote)`.
35 : //! The idea is to allow at least one layer to be resident per tenant, to ensure it can make forward progress
36 : //! during page reconstruction.
37 : //! An alternative default for all tenants can be specified in the `tenant_config` section of the config.
38 : //! Lastly, each tenant can have an override in their respective tenant config (`min_resident_size_override`).
39 :
40 : // Implementation notes:
41 : // - The `#[allow(dead_code)]` above various structs are to suppress warnings about only the Debug impl
42 : // reading these fields. We use the Debug impl for semi-structured logging, though.
43 :
44 : use std::{sync::Arc, time::SystemTime};
45 :
46 : use anyhow::Context;
47 : use pageserver_api::{config::DiskUsageEvictionTaskConfig, shard::TenantShardId};
48 : use remote_storage::GenericRemoteStorage;
49 : use serde::Serialize;
50 : use tokio::time::Instant;
51 : use tokio_util::sync::CancellationToken;
52 : use tracing::{debug, error, info, instrument, warn, Instrument};
53 : use utils::{completion, id::TimelineId};
54 :
55 : use crate::{
56 : config::PageServerConf,
57 : metrics::disk_usage_based_eviction::METRICS,
58 : task_mgr::{self, BACKGROUND_RUNTIME},
59 : tenant::{
60 : mgr::TenantManager,
61 : remote_timeline_client::LayerFileMetadata,
62 : secondary::SecondaryTenant,
63 : storage_layer::{AsLayerDesc, EvictionError, Layer, LayerName, LayerVisibilityHint},
64 : },
65 : CancellableTask, DiskUsageEvictionTask,
66 : };
67 :
68 : /// Selects the sort order for eviction candidates *after* per tenant `min_resident_size`
69 : /// partitioning.
70 : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
71 : pub enum EvictionOrder {
72 : /// Order the layers to be evicted by how recently they have been accessed relatively within
73 : /// the set of resident layers of a tenant.
74 : RelativeAccessed {
75 : /// Determines if the tenant with most layers should lose first.
76 : ///
77 : /// Having this enabled is currently the only reasonable option, because the order in which
78 : /// we read tenants is deterministic. If we find the need to use this as `false`, we need
79 : /// to ensure nondeterminism by adding in a random number to break the
80 : /// `relative_last_activity==0.0` ties.
81 : highest_layer_count_loses_first: bool,
82 : },
83 : }
84 :
85 : impl From<pageserver_api::config::EvictionOrder> for EvictionOrder {
86 0 : fn from(value: pageserver_api::config::EvictionOrder) -> Self {
87 0 : match value {
88 0 : pageserver_api::config::EvictionOrder::RelativeAccessed {
89 0 : highest_layer_count_loses_first,
90 0 : } => Self::RelativeAccessed {
91 0 : highest_layer_count_loses_first,
92 0 : },
93 0 : }
94 0 : }
95 : }
96 :
97 : impl EvictionOrder {
98 0 : fn sort(&self, candidates: &mut [(EvictionPartition, EvictionCandidate)]) {
99 : use EvictionOrder::*;
100 :
101 0 : match self {
102 0 : RelativeAccessed { .. } => candidates.sort_unstable_by_key(|(partition, candidate)| {
103 0 : (*partition, candidate.relative_last_activity)
104 0 : }),
105 0 : }
106 0 : }
107 :
108 : /// Called to fill in the [`EvictionCandidate::relative_last_activity`] while iterating tenants
109 : /// layers in **most** recently used order.
110 40 : fn relative_last_activity(&self, total: usize, index: usize) -> finite_f32::FiniteF32 {
111 : use EvictionOrder::*;
112 :
113 40 : match self {
114 40 : RelativeAccessed {
115 40 : highest_layer_count_loses_first,
116 : } => {
117 : // keeping the -1 or not decides if every tenant should lose their least recently accessed
118 : // layer OR if this should happen in the order of having highest layer count:
119 40 : let fudge = if *highest_layer_count_loses_first {
120 : // relative_last_activity vs. tenant layer count:
121 : // - 0.1..=1.0 (10 layers)
122 : // - 0.01..=1.0 (100 layers)
123 : // - 0.001..=1.0 (1000 layers)
124 : //
125 : // leading to evicting less of the smallest tenants.
126 20 : 0
127 : } else {
128 : // use full 0.0..=1.0 range, which means even the smallest tenants could always lose a
129 : // layer. the actual ordering is unspecified: for 10k tenants on a pageserver it could
130 : // be that less than 10k layer evictions is enough, so we would not need to evict from
131 : // all tenants.
132 : //
133 : // as the tenant ordering is now deterministic this could hit the same tenants
134 : // disproportionetly on multiple invocations. alternative could be to remember how many
135 : // layers did we evict last time from this tenant, and inject that as an additional
136 : // fudge here.
137 20 : 1
138 : };
139 :
140 40 : let total = total.checked_sub(fudge).filter(|&x| x > 1).unwrap_or(1);
141 40 : let divider = total as f32;
142 40 :
143 40 : // most recently used is always (total - 0) / divider == 1.0
144 40 : // least recently used depends on the fudge:
145 40 : // - (total - 1) - (total - 1) / total => 0 / total
146 40 : // - total - (total - 1) / total => 1 / total
147 40 : let distance = (total - index) as f32;
148 40 :
149 40 : finite_f32::FiniteF32::try_from_normalized(distance / divider)
150 40 : .unwrap_or_else(|val| {
151 0 : tracing::warn!(%fudge, "calculated invalid relative_last_activity for i={index}, total={total}: {val}");
152 0 : finite_f32::FiniteF32::ZERO
153 40 : })
154 40 : }
155 40 : }
156 40 : }
157 : }
158 :
159 : #[derive(Default)]
160 : pub struct State {
161 : /// Exclude http requests and background task from running at the same time.
162 : mutex: tokio::sync::Mutex<()>,
163 : }
164 :
165 0 : pub fn launch_disk_usage_global_eviction_task(
166 0 : conf: &'static PageServerConf,
167 0 : storage: GenericRemoteStorage,
168 0 : state: Arc<State>,
169 0 : tenant_manager: Arc<TenantManager>,
170 0 : background_jobs_barrier: completion::Barrier,
171 0 : ) -> Option<DiskUsageEvictionTask> {
172 0 : let Some(task_config) = &conf.disk_usage_based_eviction else {
173 0 : info!("disk usage based eviction task not configured");
174 0 : return None;
175 : };
176 :
177 0 : info!("launching disk usage based eviction task");
178 :
179 0 : let cancel = CancellationToken::new();
180 0 : let task = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
181 0 : "disk usage based eviction",
182 0 : {
183 0 : let cancel = cancel.clone();
184 0 : async move {
185 0 : // wait until initial load is complete, because we cannot evict from loading tenants.
186 0 : tokio::select! {
187 0 : _ = cancel.cancelled() => { return anyhow::Ok(()); },
188 0 : _ = background_jobs_barrier.wait() => { }
189 0 : };
190 0 :
191 0 : disk_usage_eviction_task(&state, task_config, &storage, tenant_manager, cancel)
192 0 : .await;
193 0 : anyhow::Ok(())
194 0 : }
195 0 : },
196 0 : ));
197 0 :
198 0 : Some(DiskUsageEvictionTask(CancellableTask { cancel, task }))
199 0 : }
200 :
201 0 : #[instrument(skip_all)]
202 : async fn disk_usage_eviction_task(
203 : state: &State,
204 : task_config: &DiskUsageEvictionTaskConfig,
205 : storage: &GenericRemoteStorage,
206 : tenant_manager: Arc<TenantManager>,
207 : cancel: CancellationToken,
208 : ) {
209 : scopeguard::defer! {
210 : info!("disk usage based eviction task finishing");
211 : };
212 :
213 : use crate::tenant::tasks::random_init_delay;
214 : {
215 : if random_init_delay(task_config.period, &cancel)
216 : .await
217 : .is_err()
218 : {
219 : return;
220 : }
221 : }
222 :
223 : let mut iteration_no = 0;
224 : loop {
225 : iteration_no += 1;
226 : let start = Instant::now();
227 :
228 0 : async {
229 0 : let res = disk_usage_eviction_task_iteration(
230 0 : state,
231 0 : task_config,
232 0 : storage,
233 0 : &tenant_manager,
234 0 : &cancel,
235 0 : )
236 0 : .await;
237 :
238 0 : match res {
239 0 : Ok(()) => {}
240 0 : Err(e) => {
241 0 : // these stat failures are expected to be very rare
242 0 : warn!("iteration failed, unexpected error: {e:#}");
243 : }
244 : }
245 0 : }
246 : .instrument(tracing::info_span!("iteration", iteration_no))
247 : .await;
248 :
249 : let sleep_until = start + task_config.period;
250 : if tokio::time::timeout_at(sleep_until, cancel.cancelled())
251 : .await
252 : .is_ok()
253 : {
254 : break;
255 : }
256 : }
257 : }
258 :
259 : pub trait Usage: Clone + Copy + std::fmt::Debug {
260 : fn has_pressure(&self) -> bool;
261 : fn add_available_bytes(&mut self, bytes: u64);
262 : }
263 :
264 0 : async fn disk_usage_eviction_task_iteration(
265 0 : state: &State,
266 0 : task_config: &DiskUsageEvictionTaskConfig,
267 0 : storage: &GenericRemoteStorage,
268 0 : tenant_manager: &Arc<TenantManager>,
269 0 : cancel: &CancellationToken,
270 0 : ) -> anyhow::Result<()> {
271 0 : let tenants_dir = tenant_manager.get_conf().tenants_path();
272 0 : let usage_pre = filesystem_level_usage::get(&tenants_dir, task_config)
273 0 : .context("get filesystem-level disk usage before evictions")?;
274 0 : let res = disk_usage_eviction_task_iteration_impl(
275 0 : state,
276 0 : storage,
277 0 : usage_pre,
278 0 : tenant_manager,
279 0 : task_config.eviction_order.into(),
280 0 : cancel,
281 0 : )
282 0 : .await;
283 0 : match res {
284 0 : Ok(outcome) => {
285 0 : debug!(?outcome, "disk_usage_eviction_iteration finished");
286 0 : match outcome {
287 0 : IterationOutcome::NoPressure | IterationOutcome::Cancelled => {
288 0 : // nothing to do, select statement below will handle things
289 0 : }
290 0 : IterationOutcome::Finished(outcome) => {
291 : // Verify with statvfs whether we made any real progress
292 0 : let after = filesystem_level_usage::get(&tenants_dir, task_config)
293 0 : // It's quite unlikely to hit the error here. Keep the code simple and bail out.
294 0 : .context("get filesystem-level disk usage after evictions")?;
295 :
296 0 : debug!(?after, "disk usage");
297 :
298 0 : if after.has_pressure() {
299 : // Don't bother doing an out-of-order iteration here now.
300 : // In practice, the task period is set to a value in the tens-of-seconds range,
301 : // which will cause another iteration to happen soon enough.
302 : // TODO: deltas between the three different usages would be helpful,
303 : // consider MiB, GiB, TiB
304 0 : warn!(?outcome, ?after, "disk usage still high");
305 : } else {
306 0 : info!(?outcome, ?after, "disk usage pressure relieved");
307 : }
308 : }
309 : }
310 : }
311 0 : Err(e) => {
312 0 : error!("disk_usage_eviction_iteration failed: {:#}", e);
313 : }
314 : }
315 :
316 0 : Ok(())
317 0 : }
318 :
319 : #[derive(Debug, Serialize)]
320 : #[allow(clippy::large_enum_variant)]
321 : pub enum IterationOutcome<U> {
322 : NoPressure,
323 : Cancelled,
324 : Finished(IterationOutcomeFinished<U>),
325 : }
326 :
327 : #[derive(Debug, Serialize)]
328 : pub struct IterationOutcomeFinished<U> {
329 : /// The actual usage observed before we started the iteration.
330 : before: U,
331 : /// The expected value for `after`, according to internal accounting, after phase 1.
332 : planned: PlannedUsage<U>,
333 : /// The outcome of phase 2, where we actually do the evictions.
334 : ///
335 : /// If all layers that phase 1 planned to evict _can_ actually get evicted, this will
336 : /// be the same as `planned`.
337 : assumed: AssumedUsage<U>,
338 : }
339 :
340 : #[derive(Debug, Serialize)]
341 : struct AssumedUsage<U> {
342 : /// The expected value for `after`, after phase 2.
343 : projected_after: U,
344 : /// The layers we failed to evict during phase 2.
345 : failed: LayerCount,
346 : }
347 :
348 : #[derive(Debug, Serialize)]
349 : struct PlannedUsage<U> {
350 : respecting_tenant_min_resident_size: U,
351 : fallback_to_global_lru: Option<U>,
352 : }
353 :
354 : #[derive(Debug, Default, Serialize)]
355 : struct LayerCount {
356 : file_sizes: u64,
357 : count: usize,
358 : }
359 :
360 0 : pub(crate) async fn disk_usage_eviction_task_iteration_impl<U: Usage>(
361 0 : state: &State,
362 0 : _storage: &GenericRemoteStorage,
363 0 : usage_pre: U,
364 0 : tenant_manager: &Arc<TenantManager>,
365 0 : eviction_order: EvictionOrder,
366 0 : cancel: &CancellationToken,
367 0 : ) -> anyhow::Result<IterationOutcome<U>> {
368 : // use tokio's mutex to get a Sync guard (instead of std::sync::Mutex)
369 0 : let _g = state
370 0 : .mutex
371 0 : .try_lock()
372 0 : .map_err(|_| anyhow::anyhow!("iteration is already executing"))?;
373 :
374 0 : debug!(?usage_pre, "disk usage");
375 :
376 0 : if !usage_pre.has_pressure() {
377 0 : return Ok(IterationOutcome::NoPressure);
378 0 : }
379 0 :
380 0 : warn!(
381 : ?usage_pre,
382 0 : "running disk usage based eviction due to pressure"
383 : );
384 :
385 0 : let (candidates, collection_time) = {
386 0 : let started_at = std::time::Instant::now();
387 0 : match collect_eviction_candidates(tenant_manager, eviction_order, cancel).await? {
388 : EvictionCandidates::Cancelled => {
389 0 : return Ok(IterationOutcome::Cancelled);
390 : }
391 0 : EvictionCandidates::Finished(partitioned) => (partitioned, started_at.elapsed()),
392 0 : }
393 0 : };
394 0 :
395 0 : METRICS.layers_collected.inc_by(candidates.len() as u64);
396 0 :
397 0 : tracing::info!(
398 0 : elapsed_ms = collection_time.as_millis(),
399 0 : total_layers = candidates.len(),
400 0 : "collection completed"
401 : );
402 :
403 : // Debug-log the list of candidates
404 0 : let now = SystemTime::now();
405 0 : for (i, (partition, candidate)) in candidates.iter().enumerate() {
406 0 : let nth = i + 1;
407 0 : let total_candidates = candidates.len();
408 0 : let size = candidate.layer.get_file_size();
409 0 : let rel = candidate.relative_last_activity;
410 0 : debug!(
411 0 : "cand {nth}/{total_candidates}: size={size}, rel_last_activity={rel}, no_access_for={}us, partition={partition:?}, {}/{}/{}",
412 0 : now.duration_since(candidate.last_activity_ts)
413 0 : .unwrap()
414 0 : .as_micros(),
415 0 : candidate.layer.get_tenant_shard_id(),
416 0 : candidate.layer.get_timeline_id(),
417 0 : candidate.layer.get_name(),
418 : );
419 : }
420 :
421 : // phase1: select victims to relieve pressure
422 : //
423 : // Walk through the list of candidates, until we have accumulated enough layers to get
424 : // us back under the pressure threshold. 'usage_planned' is updated so that it tracks
425 : // how much disk space would be used after evicting all the layers up to the current
426 : // point in the list.
427 : //
428 : // If we get far enough in the list that we start to evict layers that are below
429 : // the tenant's min-resident-size threshold, print a warning, and memorize the disk
430 : // usage at that point, in 'usage_planned_min_resident_size_respecting'.
431 :
432 0 : let (evicted_amount, usage_planned) =
433 0 : select_victims(&candidates, usage_pre).into_amount_and_planned();
434 0 :
435 0 : METRICS.layers_selected.inc_by(evicted_amount as u64);
436 0 :
437 0 : // phase2: evict layers
438 0 :
439 0 : let mut js = tokio::task::JoinSet::new();
440 0 : let limit = 1000;
441 0 :
442 0 : let mut evicted = candidates.into_iter().take(evicted_amount).fuse();
443 0 : let mut consumed_all = false;
444 0 :
445 0 : // After the evictions, `usage_assumed` is the post-eviction usage,
446 0 : // according to internal accounting.
447 0 : let mut usage_assumed = usage_pre;
448 0 : let mut evictions_failed = LayerCount::default();
449 0 :
450 0 : let evict_layers = async move {
451 : loop {
452 0 : let next = if js.len() >= limit || consumed_all {
453 0 : js.join_next().await
454 0 : } else if !js.is_empty() {
455 : // opportunistically consume ready result, one per each new evicted
456 0 : futures::future::FutureExt::now_or_never(js.join_next()).and_then(|x| x)
457 : } else {
458 0 : None
459 : };
460 :
461 0 : if let Some(next) = next {
462 0 : match next {
463 0 : Ok(Ok(file_size)) => {
464 0 : METRICS.layers_evicted.inc();
465 0 : usage_assumed.add_available_bytes(file_size);
466 0 : }
467 : Ok(Err((
468 0 : file_size,
469 0 : EvictionError::NotFound
470 0 : | EvictionError::Downloaded
471 0 : | EvictionError::Timeout,
472 0 : ))) => {
473 0 : evictions_failed.file_sizes += file_size;
474 0 : evictions_failed.count += 1;
475 0 : }
476 0 : Err(je) if je.is_cancelled() => unreachable!("not used"),
477 0 : Err(je) if je.is_panic() => { /* already logged */ }
478 0 : Err(je) => tracing::error!("unknown JoinError: {je:?}"),
479 : }
480 0 : }
481 :
482 0 : if consumed_all && js.is_empty() {
483 0 : break;
484 0 : }
485 :
486 : // calling again when consumed_all is fine as evicted is fused.
487 0 : let Some((_partition, candidate)) = evicted.next() else {
488 0 : if !consumed_all {
489 0 : tracing::info!("all evictions started, waiting");
490 0 : consumed_all = true;
491 0 : }
492 0 : continue;
493 : };
494 :
495 0 : match candidate.layer {
496 0 : EvictionLayer::Attached(layer) => {
497 0 : let file_size = layer.layer_desc().file_size;
498 0 : js.spawn(async move {
499 0 : // have a low eviction waiting timeout because our LRU calculations go stale fast;
500 0 : // also individual layer evictions could hang because of bugs and we do not want to
501 0 : // pause disk_usage_based_eviction for such.
502 0 : let timeout = std::time::Duration::from_secs(5);
503 0 :
504 0 : match layer.evict_and_wait(timeout).await {
505 0 : Ok(()) => Ok(file_size),
506 0 : Err(e) => Err((file_size, e)),
507 : }
508 0 : });
509 0 : }
510 0 : EvictionLayer::Secondary(layer) => {
511 0 : let file_size = layer.metadata.file_size;
512 0 :
513 0 : js.spawn(async move {
514 0 : layer
515 0 : .secondary_tenant
516 0 : .evict_layer(layer.timeline_id, layer.name)
517 0 : .await;
518 0 : Ok(file_size)
519 0 : });
520 0 : }
521 : }
522 0 : tokio::task::yield_now().await;
523 : }
524 :
525 0 : (usage_assumed, evictions_failed)
526 0 : };
527 :
528 0 : let started_at = std::time::Instant::now();
529 0 :
530 0 : let evict_layers = async move {
531 0 : let mut evict_layers = std::pin::pin!(evict_layers);
532 0 :
533 0 : let maximum_expected = std::time::Duration::from_secs(10);
534 :
535 0 : let res = tokio::time::timeout(maximum_expected, &mut evict_layers).await;
536 0 : let tuple = if let Ok(tuple) = res {
537 0 : tuple
538 : } else {
539 0 : let elapsed = started_at.elapsed();
540 0 : tracing::info!(elapsed_ms = elapsed.as_millis(), "still ongoing");
541 0 : evict_layers.await
542 : };
543 :
544 0 : let elapsed = started_at.elapsed();
545 0 : tracing::info!(elapsed_ms = elapsed.as_millis(), "completed");
546 0 : tuple
547 0 : };
548 :
549 0 : let evict_layers =
550 0 : evict_layers.instrument(tracing::info_span!("evict_layers", layers=%evicted_amount));
551 :
552 0 : let (usage_assumed, evictions_failed) = tokio::select! {
553 0 : tuple = evict_layers => { tuple },
554 0 : _ = cancel.cancelled() => {
555 : // dropping joinset will abort all pending evict_and_waits and that is fine, our
556 : // requests will still stand
557 0 : return Ok(IterationOutcome::Cancelled);
558 : }
559 : };
560 :
561 0 : Ok(IterationOutcome::Finished(IterationOutcomeFinished {
562 0 : before: usage_pre,
563 0 : planned: usage_planned,
564 0 : assumed: AssumedUsage {
565 0 : projected_after: usage_assumed,
566 0 : failed: evictions_failed,
567 0 : },
568 0 : }))
569 0 : }
570 :
571 : #[derive(Clone)]
572 : pub(crate) struct EvictionSecondaryLayer {
573 : pub(crate) secondary_tenant: Arc<SecondaryTenant>,
574 : pub(crate) timeline_id: TimelineId,
575 : pub(crate) name: LayerName,
576 : pub(crate) metadata: LayerFileMetadata,
577 : }
578 :
579 : /// Full [`Layer`] objects are specific to tenants in attached mode. This type is a layer
580 : /// of indirection to store either a `Layer`, or a reference to a secondary tenant and a layer name.
581 : #[derive(Clone)]
582 : pub(crate) enum EvictionLayer {
583 : Attached(Layer),
584 : Secondary(EvictionSecondaryLayer),
585 : }
586 :
587 : impl From<Layer> for EvictionLayer {
588 0 : fn from(value: Layer) -> Self {
589 0 : Self::Attached(value)
590 0 : }
591 : }
592 :
593 : impl EvictionLayer {
594 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
595 0 : match self {
596 0 : Self::Attached(l) => &l.layer_desc().tenant_shard_id,
597 0 : Self::Secondary(sl) => sl.secondary_tenant.get_tenant_shard_id(),
598 : }
599 0 : }
600 :
601 0 : pub(crate) fn get_timeline_id(&self) -> &TimelineId {
602 0 : match self {
603 0 : Self::Attached(l) => &l.layer_desc().timeline_id,
604 0 : Self::Secondary(sl) => &sl.timeline_id,
605 : }
606 0 : }
607 :
608 0 : pub(crate) fn get_name(&self) -> LayerName {
609 0 : match self {
610 0 : Self::Attached(l) => l.layer_desc().layer_name(),
611 0 : Self::Secondary(sl) => sl.name.clone(),
612 : }
613 0 : }
614 :
615 0 : pub(crate) fn get_file_size(&self) -> u64 {
616 0 : match self {
617 0 : Self::Attached(l) => l.layer_desc().file_size,
618 0 : Self::Secondary(sl) => sl.metadata.file_size,
619 : }
620 0 : }
621 : }
622 :
623 : #[derive(Clone)]
624 : pub(crate) struct EvictionCandidate {
625 : pub(crate) layer: EvictionLayer,
626 : pub(crate) last_activity_ts: SystemTime,
627 : pub(crate) relative_last_activity: finite_f32::FiniteF32,
628 : pub(crate) visibility: LayerVisibilityHint,
629 : }
630 :
631 : impl std::fmt::Display for EvictionLayer {
632 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
633 0 : match self {
634 0 : Self::Attached(l) => l.fmt(f),
635 0 : Self::Secondary(sl) => {
636 0 : write!(f, "{}/{}", sl.timeline_id, sl.name)
637 : }
638 : }
639 0 : }
640 : }
641 :
642 : #[derive(Default)]
643 : pub(crate) struct DiskUsageEvictionInfo {
644 : /// Timeline's largest layer (remote or resident)
645 : pub max_layer_size: Option<u64>,
646 : /// Timeline's resident layers
647 : pub resident_layers: Vec<EvictionCandidate>,
648 : }
649 :
650 : impl std::fmt::Debug for EvictionCandidate {
651 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
652 0 : // format the tv_sec, tv_nsec into rfc3339 in case someone is looking at it
653 0 : // having to allocate a string to this is bad, but it will rarely be formatted
654 0 : let ts = chrono::DateTime::<chrono::Utc>::from(self.last_activity_ts);
655 0 : let ts = ts.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true);
656 : struct DisplayIsDebug<'a, T>(&'a T);
657 : impl<T: std::fmt::Display> std::fmt::Debug for DisplayIsDebug<'_, T> {
658 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
659 0 : write!(f, "{}", self.0)
660 0 : }
661 : }
662 0 : f.debug_struct("LocalLayerInfoForDiskUsageEviction")
663 0 : .field("layer", &DisplayIsDebug(&self.layer))
664 0 : .field("last_activity", &ts)
665 0 : .finish()
666 0 : }
667 : }
668 :
669 : #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
670 : enum EvictionPartition {
671 : // A layer that is un-wanted by the tenant: evict all these first, before considering
672 : // any other layers
673 : EvictNow,
674 :
675 : // Above the minimum size threshold: this layer is a candidate for eviction.
676 : Above,
677 :
678 : // Below the minimum size threshold: this layer should only be evicted if all the
679 : // tenants' layers above the minimum size threshold have already been considered.
680 : Below,
681 : }
682 :
683 : enum EvictionCandidates {
684 : Cancelled,
685 : Finished(Vec<(EvictionPartition, EvictionCandidate)>),
686 : }
687 :
688 : /// Gather the eviction candidates.
689 : ///
690 : /// The returned `Ok(EvictionCandidates::Finished(candidates))` is sorted in eviction
691 : /// order. A caller that evicts in that order, until pressure is relieved, implements
692 : /// the eviction policy outlined in the module comment.
693 : ///
694 : /// # Example with EvictionOrder::AbsoluteAccessed
695 : ///
696 : /// Imagine that there are two tenants, A and B, with five layers each, a-e.
697 : /// Each layer has size 100, and both tenant's min_resident_size is 150.
698 : /// The eviction order would be
699 : ///
700 : /// ```text
701 : /// partition last_activity_ts tenant/layer
702 : /// Above 18:30 A/c
703 : /// Above 19:00 A/b
704 : /// Above 18:29 B/c
705 : /// Above 19:05 B/b
706 : /// Above 20:00 B/a
707 : /// Above 20:03 A/a
708 : /// Below 20:30 A/d
709 : /// Below 20:40 B/d
710 : /// Below 20:45 B/e
711 : /// Below 20:58 A/e
712 : /// ```
713 : ///
714 : /// Now, if we need to evict 300 bytes to relieve pressure, we'd evict `A/c, A/b, B/c`.
715 : /// They are all in the `Above` partition, so, we respected each tenant's min_resident_size.
716 : ///
717 : /// But, if we need to evict 900 bytes to relieve pressure, we'd evict
718 : /// `A/c, A/b, B/c, B/b, B/a, A/a, A/d, B/d, B/e`, reaching into the `Below` partition
719 : /// after exhauting the `Above` partition.
720 : /// So, we did not respect each tenant's min_resident_size.
721 : ///
722 : /// # Example with EvictionOrder::RelativeAccessed
723 : ///
724 : /// ```text
725 : /// partition relative_age last_activity_ts tenant/layer
726 : /// Above 0/4 18:30 A/c
727 : /// Above 0/4 18:29 B/c
728 : /// Above 1/4 19:00 A/b
729 : /// Above 1/4 19:05 B/b
730 : /// Above 2/4 20:00 B/a
731 : /// Above 2/4 20:03 A/a
732 : /// Below 3/4 20:30 A/d
733 : /// Below 3/4 20:40 B/d
734 : /// Below 4/4 20:45 B/e
735 : /// Below 4/4 20:58 A/e
736 : /// ```
737 : ///
738 : /// With tenants having the same number of layers the picture does not change much. The same with
739 : /// A having many more layers **resident** (not all of them listed):
740 : ///
741 : /// ```text
742 : /// Above 0/100 18:30 A/c
743 : /// Above 0/4 18:29 B/c
744 : /// Above 1/100 19:00 A/b
745 : /// Above 2/100 20:03 A/a
746 : /// Above 3/100 20:03 A/nth_3
747 : /// Above 4/100 20:03 A/nth_4
748 : /// ...
749 : /// Above 1/4 19:05 B/b
750 : /// Above 25/100 20:04 A/nth_25
751 : /// ...
752 : /// Above 2/4 20:00 B/a
753 : /// Above 50/100 20:10 A/nth_50
754 : /// ...
755 : /// Below 3/4 20:40 B/d
756 : /// Below 99/100 20:30 A/nth_99
757 : /// Below 4/4 20:45 B/e
758 : /// Below 100/100 20:58 A/nth_100
759 : /// ```
760 : ///
761 : /// Now it's easier to see that because A has grown fast it has more layers to get evicted. What is
762 : /// difficult to see is what happens on the next round assuming the evicting 23 from the above list
763 : /// relieves the pressure (22 A layers gone, 1 B layers gone) but a new fast growing tenant C has
764 : /// appeared:
765 : ///
766 : /// ```text
767 : /// Above 0/87 20:04 A/nth_23
768 : /// Above 0/3 19:05 B/b
769 : /// Above 0/50 20:59 C/nth_0
770 : /// Above 1/87 20:04 A/nth_24
771 : /// Above 1/50 21:00 C/nth_1
772 : /// Above 2/87 20:04 A/nth_25
773 : /// ...
774 : /// Above 16/50 21:02 C/nth_16
775 : /// Above 1/3 20:00 B/a
776 : /// Above 27/87 20:10 A/nth_50
777 : /// ...
778 : /// Below 2/3 20:40 B/d
779 : /// Below 49/50 21:05 C/nth_49
780 : /// Below 86/87 20:30 A/nth_99
781 : /// Below 3/3 20:45 B/e
782 : /// Below 50/50 21:05 C/nth_50
783 : /// Below 87/87 20:58 A/nth_100
784 : /// ```
785 : ///
786 : /// Now relieving pressure with 23 layers would cost:
787 : /// - tenant A 14 layers
788 : /// - tenant B 1 layer
789 : /// - tenant C 8 layers
790 0 : async fn collect_eviction_candidates(
791 0 : tenant_manager: &Arc<TenantManager>,
792 0 : eviction_order: EvictionOrder,
793 0 : cancel: &CancellationToken,
794 0 : ) -> anyhow::Result<EvictionCandidates> {
795 : const LOG_DURATION_THRESHOLD: std::time::Duration = std::time::Duration::from_secs(10);
796 :
797 : // get a snapshot of the list of tenants
798 0 : let tenants = tenant_manager
799 0 : .list_tenants()
800 0 : .context("get list of tenants")?;
801 :
802 : // TODO: avoid listing every layer in every tenant: this loop can block the executor,
803 : // and the resulting data structure can be huge.
804 : // (https://github.com/neondatabase/neon/issues/6224)
805 0 : let mut candidates = Vec::new();
806 :
807 0 : for (tenant_id, _state, _gen) in tenants {
808 0 : if cancel.is_cancelled() {
809 0 : return Ok(EvictionCandidates::Cancelled);
810 0 : }
811 0 : let tenant = match tenant_manager.get_attached_tenant_shard(tenant_id) {
812 0 : Ok(tenant) if tenant.is_active() => tenant,
813 : Ok(_) => {
814 0 : debug!(tenant_id=%tenant_id.tenant_id, shard_id=%tenant_id.shard_slug(), "Tenant shard is not active");
815 0 : continue;
816 : }
817 0 : Err(e) => {
818 0 : // this can happen if tenant has lifecycle transition after we fetched it
819 0 : debug!("failed to get tenant: {e:#}");
820 0 : continue;
821 : }
822 : };
823 :
824 0 : if tenant.cancel.is_cancelled() {
825 0 : info!(%tenant_id, "Skipping tenant for eviction, it is shutting down");
826 0 : continue;
827 0 : }
828 0 :
829 0 : let started_at = std::time::Instant::now();
830 0 :
831 0 : // collect layers from all timelines in this tenant
832 0 : //
833 0 : // If one of the timelines becomes `!is_active()` during the iteration,
834 0 : // for example because we're shutting down, then `max_layer_size` can be too small.
835 0 : // That's OK. This code only runs under a disk pressure situation, and being
836 0 : // a little unfair to tenants during shutdown in such a situation is tolerable.
837 0 : let mut tenant_candidates = Vec::new();
838 0 : let mut max_layer_size = 0;
839 0 : for tl in tenant.list_timelines() {
840 0 : if !tl.is_active() {
841 0 : continue;
842 0 : }
843 0 : let info = tl.get_local_layers_for_disk_usage_eviction().await;
844 0 : debug!(tenant_id=%tl.tenant_shard_id.tenant_id, shard_id=%tl.tenant_shard_id.shard_slug(), timeline_id=%tl.timeline_id, "timeline resident layers count: {}", info.resident_layers.len());
845 :
846 0 : tenant_candidates.extend(info.resident_layers.into_iter());
847 0 : max_layer_size = max_layer_size.max(info.max_layer_size.unwrap_or(0));
848 0 :
849 0 : if cancel.is_cancelled() {
850 0 : return Ok(EvictionCandidates::Cancelled);
851 0 : }
852 : }
853 :
854 : // `min_resident_size` defaults to maximum layer file size of the tenant.
855 : // This ensures that each tenant can have at least one layer resident at a given time,
856 : // ensuring forward progress for a single Timeline::get in that tenant.
857 : // It's a questionable heuristic since, usually, there are many Timeline::get
858 : // requests going on for a tenant, and, at least in Neon prod, the median
859 : // layer file size is much smaller than the compaction target size.
860 : // We could be better here, e.g., sum of all L0 layers + most recent L1 layer.
861 : // That's what's typically used by the various background loops.
862 : //
863 : // The default can be overridden with a fixed value in the tenant conf.
864 : // A default override can be put in the default tenant conf in the pageserver.toml.
865 0 : let min_resident_size = if let Some(s) = tenant.get_min_resident_size_override() {
866 0 : debug!(
867 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
868 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
869 0 : overridden_size=s,
870 0 : "using overridden min resident size for tenant"
871 : );
872 0 : s
873 : } else {
874 0 : debug!(
875 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
876 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
877 0 : max_layer_size,
878 0 : "using max layer size as min_resident_size for tenant",
879 : );
880 0 : max_layer_size
881 : };
882 :
883 : // Sort layers most-recently-used first, then calculate [`EvictionPartition`] for each layer,
884 : // where the inputs are:
885 : // - whether the layer is visible
886 : // - whether the layer is above/below the min_resident_size cutline
887 0 : tenant_candidates
888 0 : .sort_unstable_by_key(|layer_info| std::cmp::Reverse(layer_info.last_activity_ts));
889 0 : let mut cumsum: i128 = 0;
890 0 :
891 0 : let total = tenant_candidates.len();
892 0 :
893 0 : let tenant_candidates =
894 0 : tenant_candidates
895 0 : .into_iter()
896 0 : .enumerate()
897 0 : .map(|(i, mut candidate)| {
898 0 : // as we iterate this reverse sorted list, the most recently accessed layer will always
899 0 : // be 1.0; this is for us to evict it last.
900 0 : candidate.relative_last_activity =
901 0 : eviction_order.relative_last_activity(total, i);
902 :
903 0 : let partition = match candidate.visibility {
904 : LayerVisibilityHint::Covered => {
905 : // Covered layers are evicted first
906 0 : EvictionPartition::EvictNow
907 : }
908 : LayerVisibilityHint::Visible => {
909 0 : cumsum += i128::from(candidate.layer.get_file_size());
910 0 :
911 0 : if cumsum > min_resident_size as i128 {
912 0 : EvictionPartition::Above
913 : } else {
914 : // The most recent layers below the min_resident_size threshold
915 : // are the last to be evicted.
916 0 : EvictionPartition::Below
917 : }
918 : }
919 : };
920 :
921 0 : (partition, candidate)
922 0 : });
923 0 :
924 0 : METRICS
925 0 : .tenant_layer_count
926 0 : .observe(tenant_candidates.len() as f64);
927 0 :
928 0 : candidates.extend(tenant_candidates);
929 0 :
930 0 : let elapsed = started_at.elapsed();
931 0 : METRICS
932 0 : .tenant_collection_time
933 0 : .observe(elapsed.as_secs_f64());
934 0 :
935 0 : if elapsed > LOG_DURATION_THRESHOLD {
936 0 : tracing::info!(
937 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
938 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
939 0 : elapsed_ms = elapsed.as_millis(),
940 0 : "collection took longer than threshold"
941 : );
942 0 : }
943 : }
944 :
945 : // Note: the same tenant ID might be hit twice, if it transitions from attached to
946 : // secondary while we run. That is okay: when we eventually try and run the eviction,
947 : // the `Gate` on the object will ensure that whichever one has already been shut down
948 : // will not delete anything.
949 :
950 0 : let mut secondary_tenants = Vec::new();
951 0 : tenant_manager.foreach_secondary_tenants(
952 0 : |_tenant_shard_id: &TenantShardId, state: &Arc<SecondaryTenant>| {
953 0 : secondary_tenants.push(state.clone());
954 0 : },
955 0 : );
956 :
957 0 : for tenant in secondary_tenants {
958 : // for secondary tenants we use a sum of on_disk layers and already evicted layers. this is
959 : // to prevent repeated disk usage based evictions from completely draining less often
960 : // updating secondaries.
961 0 : let (mut layer_info, total_layers) = tenant.get_layers_for_eviction();
962 0 :
963 0 : debug_assert!(
964 0 : total_layers >= layer_info.resident_layers.len(),
965 0 : "total_layers ({total_layers}) must be at least the resident_layers.len() ({})",
966 0 : layer_info.resident_layers.len()
967 : );
968 :
969 0 : let started_at = std::time::Instant::now();
970 0 :
971 0 : layer_info
972 0 : .resident_layers
973 0 : .sort_unstable_by_key(|layer_info| std::cmp::Reverse(layer_info.last_activity_ts));
974 0 :
975 0 : let tenant_candidates =
976 0 : layer_info
977 0 : .resident_layers
978 0 : .into_iter()
979 0 : .enumerate()
980 0 : .map(|(i, mut candidate)| {
981 0 : candidate.relative_last_activity =
982 0 : eviction_order.relative_last_activity(total_layers, i);
983 0 : (
984 0 : // Secondary locations' layers are always considered above the min resident size,
985 0 : // i.e. secondary locations are permitted to be trimmed to zero layers if all
986 0 : // the layers have sufficiently old access times.
987 0 : EvictionPartition::Above,
988 0 : candidate,
989 0 : )
990 0 : });
991 0 :
992 0 : METRICS
993 0 : .tenant_layer_count
994 0 : .observe(tenant_candidates.len() as f64);
995 0 : candidates.extend(tenant_candidates);
996 0 :
997 0 : tokio::task::yield_now().await;
998 :
999 0 : let elapsed = started_at.elapsed();
1000 0 :
1001 0 : METRICS
1002 0 : .tenant_collection_time
1003 0 : .observe(elapsed.as_secs_f64());
1004 0 :
1005 0 : if elapsed > LOG_DURATION_THRESHOLD {
1006 0 : tracing::info!(
1007 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
1008 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
1009 0 : elapsed_ms = elapsed.as_millis(),
1010 0 : "collection took longer than threshold"
1011 : );
1012 0 : }
1013 : }
1014 :
1015 0 : debug_assert!(EvictionPartition::Above < EvictionPartition::Below,
1016 0 : "as explained in the function's doc comment, layers that aren't in the tenant's min_resident_size are evicted first");
1017 0 : debug_assert!(EvictionPartition::EvictNow < EvictionPartition::Above,
1018 0 : "as explained in the function's doc comment, layers that aren't in the tenant's min_resident_size are evicted first");
1019 :
1020 0 : eviction_order.sort(&mut candidates);
1021 0 :
1022 0 : Ok(EvictionCandidates::Finished(candidates))
1023 0 : }
1024 :
1025 : /// Given a pre-sorted vec of all layers in the system, select the first N which are enough to
1026 : /// relieve pressure.
1027 : ///
1028 : /// Returns the amount of candidates selected, with the planned usage.
1029 0 : fn select_victims<U: Usage>(
1030 0 : candidates: &[(EvictionPartition, EvictionCandidate)],
1031 0 : usage_pre: U,
1032 0 : ) -> VictimSelection<U> {
1033 0 : let mut usage_when_switched = None;
1034 0 : let mut usage_planned = usage_pre;
1035 0 : let mut evicted_amount = 0;
1036 :
1037 0 : for (i, (partition, candidate)) in candidates.iter().enumerate() {
1038 0 : if !usage_planned.has_pressure() {
1039 0 : break;
1040 0 : }
1041 0 :
1042 0 : if partition == &EvictionPartition::Below && usage_when_switched.is_none() {
1043 0 : usage_when_switched = Some((usage_planned, i));
1044 0 : }
1045 :
1046 0 : usage_planned.add_available_bytes(candidate.layer.get_file_size());
1047 0 : evicted_amount += 1;
1048 : }
1049 :
1050 0 : VictimSelection {
1051 0 : amount: evicted_amount,
1052 0 : usage_pre,
1053 0 : usage_when_switched,
1054 0 : usage_planned,
1055 0 : }
1056 0 : }
1057 :
1058 : struct VictimSelection<U> {
1059 : amount: usize,
1060 : usage_pre: U,
1061 : usage_when_switched: Option<(U, usize)>,
1062 : usage_planned: U,
1063 : }
1064 :
1065 : impl<U: Usage> VictimSelection<U> {
1066 0 : fn into_amount_and_planned(self) -> (usize, PlannedUsage<U>) {
1067 0 : debug!(
1068 : evicted_amount=%self.amount,
1069 0 : "took enough candidates for pressure to be relieved"
1070 : );
1071 :
1072 0 : if let Some((usage_planned, candidate_no)) = self.usage_when_switched.as_ref() {
1073 0 : warn!(usage_pre=?self.usage_pre, ?usage_planned, candidate_no, "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy");
1074 0 : }
1075 :
1076 0 : let planned = match self.usage_when_switched {
1077 0 : Some((respecting_tenant_min_resident_size, _)) => PlannedUsage {
1078 0 : respecting_tenant_min_resident_size,
1079 0 : fallback_to_global_lru: Some(self.usage_planned),
1080 0 : },
1081 0 : None => PlannedUsage {
1082 0 : respecting_tenant_min_resident_size: self.usage_planned,
1083 0 : fallback_to_global_lru: None,
1084 0 : },
1085 : };
1086 :
1087 0 : (self.amount, planned)
1088 0 : }
1089 : }
1090 :
1091 : /// A totally ordered f32 subset we can use with sorting functions.
1092 : pub(crate) mod finite_f32 {
1093 :
1094 : /// A totally ordered f32 subset we can use with sorting functions.
1095 : #[derive(Clone, Copy, PartialEq)]
1096 : pub struct FiniteF32(f32);
1097 :
1098 : impl std::fmt::Debug for FiniteF32 {
1099 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1100 0 : std::fmt::Debug::fmt(&self.0, f)
1101 0 : }
1102 : }
1103 :
1104 : impl std::fmt::Display for FiniteF32 {
1105 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1106 0 : std::fmt::Display::fmt(&self.0, f)
1107 0 : }
1108 : }
1109 :
1110 : impl std::cmp::Eq for FiniteF32 {}
1111 :
1112 : impl std::cmp::PartialOrd for FiniteF32 {
1113 0 : fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
1114 0 : Some(self.cmp(other))
1115 0 : }
1116 : }
1117 :
1118 : impl std::cmp::Ord for FiniteF32 {
1119 0 : fn cmp(&self, other: &Self) -> std::cmp::Ordering {
1120 0 : self.0.total_cmp(&other.0)
1121 0 : }
1122 : }
1123 :
1124 : impl TryFrom<f32> for FiniteF32 {
1125 : type Error = f32;
1126 :
1127 0 : fn try_from(value: f32) -> Result<Self, Self::Error> {
1128 0 : if value.is_finite() {
1129 0 : Ok(FiniteF32(value))
1130 : } else {
1131 0 : Err(value)
1132 : }
1133 0 : }
1134 : }
1135 :
1136 : impl From<FiniteF32> for f32 {
1137 40 : fn from(value: FiniteF32) -> f32 {
1138 40 : value.0
1139 40 : }
1140 : }
1141 :
1142 : impl FiniteF32 {
1143 : pub const ZERO: FiniteF32 = FiniteF32(0.0);
1144 :
1145 40 : pub fn try_from_normalized(value: f32) -> Result<Self, f32> {
1146 40 : if (0.0..=1.0).contains(&value) {
1147 : // -0.0 is within the range, make sure it is assumed 0.0..=1.0
1148 40 : let value = value.abs();
1149 40 : Ok(FiniteF32(value))
1150 : } else {
1151 0 : Err(value)
1152 : }
1153 40 : }
1154 :
1155 40 : pub fn into_inner(self) -> f32 {
1156 40 : self.into()
1157 40 : }
1158 : }
1159 : }
1160 :
1161 : mod filesystem_level_usage {
1162 : use anyhow::Context;
1163 : use camino::Utf8Path;
1164 :
1165 : use crate::statvfs::Statvfs;
1166 :
1167 : use super::DiskUsageEvictionTaskConfig;
1168 :
1169 : #[derive(Debug, Clone, Copy)]
1170 : pub struct Usage<'a> {
1171 : config: &'a DiskUsageEvictionTaskConfig,
1172 :
1173 : /// Filesystem capacity
1174 : total_bytes: u64,
1175 : /// Free filesystem space
1176 : avail_bytes: u64,
1177 : }
1178 :
1179 : impl super::Usage for Usage<'_> {
1180 14 : fn has_pressure(&self) -> bool {
1181 14 : let usage_pct =
1182 14 : (100.0 * (1.0 - ((self.avail_bytes as f64) / (self.total_bytes as f64)))) as u64;
1183 14 :
1184 14 : let pressures = [
1185 14 : (
1186 14 : "min_avail_bytes",
1187 14 : self.avail_bytes < self.config.min_avail_bytes,
1188 14 : ),
1189 14 : (
1190 14 : "max_usage_pct",
1191 14 : usage_pct >= self.config.max_usage_pct.get() as u64,
1192 14 : ),
1193 14 : ];
1194 14 :
1195 28 : pressures.into_iter().any(|(_, has_pressure)| has_pressure)
1196 14 : }
1197 :
1198 12 : fn add_available_bytes(&mut self, bytes: u64) {
1199 12 : self.avail_bytes += bytes;
1200 12 : }
1201 : }
1202 :
1203 0 : pub fn get<'a>(
1204 0 : tenants_dir: &Utf8Path,
1205 0 : config: &'a DiskUsageEvictionTaskConfig,
1206 0 : ) -> anyhow::Result<Usage<'a>> {
1207 0 : let mock_config = {
1208 0 : #[cfg(feature = "testing")]
1209 0 : {
1210 0 : config.mock_statvfs.as_ref()
1211 : }
1212 : #[cfg(not(feature = "testing"))]
1213 : {
1214 : None
1215 : }
1216 : };
1217 :
1218 0 : let stat = Statvfs::get(tenants_dir, mock_config)
1219 0 : .context("statvfs failed, presumably directory got unlinked")?;
1220 :
1221 0 : let (avail_bytes, total_bytes) = stat.get_avail_total_bytes();
1222 0 :
1223 0 : Ok(Usage {
1224 0 : config,
1225 0 : total_bytes,
1226 0 : avail_bytes,
1227 0 : })
1228 0 : }
1229 :
1230 : #[test]
1231 2 : fn max_usage_pct_pressure() {
1232 : use super::Usage as _;
1233 : use std::time::Duration;
1234 : use utils::serde_percent::Percent;
1235 :
1236 2 : let mut usage = Usage {
1237 2 : config: &DiskUsageEvictionTaskConfig {
1238 2 : max_usage_pct: Percent::new(85).unwrap(),
1239 2 : min_avail_bytes: 0,
1240 2 : period: Duration::MAX,
1241 2 : #[cfg(feature = "testing")]
1242 2 : mock_statvfs: None,
1243 2 : eviction_order: pageserver_api::config::EvictionOrder::default(),
1244 2 : },
1245 2 : total_bytes: 100_000,
1246 2 : avail_bytes: 0,
1247 2 : };
1248 2 :
1249 2 : assert!(usage.has_pressure(), "expected pressure at 100%");
1250 :
1251 2 : usage.add_available_bytes(14_000);
1252 2 : assert!(usage.has_pressure(), "expected pressure at 86%");
1253 :
1254 2 : usage.add_available_bytes(999);
1255 2 : assert!(usage.has_pressure(), "expected pressure at 85.001%");
1256 :
1257 2 : usage.add_available_bytes(1);
1258 2 : assert!(usage.has_pressure(), "expected pressure at precisely 85%");
1259 :
1260 2 : usage.add_available_bytes(1);
1261 2 : assert!(!usage.has_pressure(), "no pressure at 84.999%");
1262 :
1263 2 : usage.add_available_bytes(999);
1264 2 : assert!(!usage.has_pressure(), "no pressure at 84%");
1265 :
1266 2 : usage.add_available_bytes(16_000);
1267 2 : assert!(!usage.has_pressure());
1268 2 : }
1269 : }
1270 :
1271 : #[cfg(test)]
1272 : mod tests {
1273 : use super::*;
1274 :
1275 : #[test]
1276 2 : fn relative_equal_bounds() {
1277 2 : let order = EvictionOrder::RelativeAccessed {
1278 2 : highest_layer_count_loses_first: false,
1279 2 : };
1280 2 :
1281 2 : let len = 10;
1282 2 : let v = (0..len)
1283 20 : .map(|i| order.relative_last_activity(len, i).into_inner())
1284 2 : .collect::<Vec<_>>();
1285 2 :
1286 2 : assert_eq!(v.first(), Some(&1.0));
1287 2 : assert_eq!(v.last(), Some(&0.0));
1288 18 : assert!(v.windows(2).all(|slice| slice[0] > slice[1]));
1289 2 : }
1290 :
1291 : #[test]
1292 2 : fn relative_spare_bounds() {
1293 2 : let order = EvictionOrder::RelativeAccessed {
1294 2 : highest_layer_count_loses_first: true,
1295 2 : };
1296 2 :
1297 2 : let len = 10;
1298 2 : let v = (0..len)
1299 20 : .map(|i| order.relative_last_activity(len, i).into_inner())
1300 2 : .collect::<Vec<_>>();
1301 2 :
1302 2 : assert_eq!(v.first(), Some(&1.0));
1303 2 : assert_eq!(v.last(), Some(&0.1));
1304 18 : assert!(v.windows(2).all(|slice| slice[0] > slice[1]));
1305 2 : }
1306 : }
|