Line data Source code
1 : //! This module implements the pageserver-global disk-usage-based layer eviction task.
2 : //!
3 : //! # Mechanics
4 : //!
5 : //! Function `launch_disk_usage_global_eviction_task` starts a pageserver-global background
6 : //! loop that evicts layers in response to a shortage of available bytes
7 : //! in the $repo/tenants directory's filesystem.
8 : //!
9 : //! The loop runs periodically at a configurable `period`.
10 : //!
11 : //! Each loop iteration uses `statvfs` to determine filesystem-level space usage.
12 : //! It compares the returned usage data against two different types of thresholds.
13 : //! The iteration tries to evict layers until app-internal accounting says we should be below the thresholds.
14 : //! We cross-check this internal accounting with the real world by making another `statvfs` at the end of the iteration.
15 : //! We're good if that second statvfs shows that we're _actually_ below the configured thresholds.
16 : //! If we're still above one or more thresholds, we emit a warning log message, leaving it to the operator to investigate further.
17 : //!
18 : //! # Eviction Policy
19 : //!
20 : //! There are two thresholds:
21 : //! `max_usage_pct` is the relative available space, expressed in percent of the total filesystem space.
22 : //! If the actual usage is higher, the threshold is exceeded.
23 : //! `min_avail_bytes` is the absolute available space in bytes.
24 : //! If the actual usage is lower, the threshold is exceeded.
25 : //! If either of these thresholds is exceeded, the system is considered to have "disk pressure", and eviction
26 : //! is performed on the next iteration, to release disk space and bring the usage below the thresholds again.
27 : //! The iteration evicts layers in LRU fashion, but, with a weak reservation per tenant.
28 : //! The reservation is to keep the most recently accessed X bytes per tenant resident.
29 : //! If we cannot relieve pressure by evicting layers outside of the reservation, we
30 : //! start evicting layers that are part of the reservation, LRU first.
31 : //!
32 : //! The value for the per-tenant reservation is referred to as `tenant_min_resident_size`
33 : //! throughout the code, but, no actual variable carries that name.
34 : //! The per-tenant default value is the `max(tenant's layer file sizes, regardless of local or remote)`.
35 : //! The idea is to allow at least one layer to be resident per tenant, to ensure it can make forward progress
36 : //! during page reconstruction.
37 : //! An alternative default for all tenants can be specified in the `tenant_config` section of the config.
38 : //! Lastly, each tenant can have an override in their respective tenant config (`min_resident_size_override`).
39 :
40 : // Implementation notes:
41 : // - The `#[allow(dead_code)]` above various structs are to suppress warnings about only the Debug impl
42 : // reading these fields. We use the Debug impl for semi-structured logging, though.
43 :
44 : use std::{
45 : sync::Arc,
46 : time::{Duration, SystemTime},
47 : };
48 :
49 : use anyhow::Context;
50 : use pageserver_api::shard::TenantShardId;
51 : use remote_storage::GenericRemoteStorage;
52 : use serde::{Deserialize, Serialize};
53 : use tokio::time::Instant;
54 : use tokio_util::sync::CancellationToken;
55 : use tracing::{debug, error, info, instrument, warn, Instrument};
56 : use utils::serde_percent::Percent;
57 : use utils::{completion, id::TimelineId};
58 :
59 : use crate::{
60 : config::PageServerConf,
61 : task_mgr::{self, TaskKind, BACKGROUND_RUNTIME},
62 : tenant::{
63 : self,
64 : mgr::TenantManager,
65 : remote_timeline_client::LayerFileMetadata,
66 : secondary::SecondaryTenant,
67 : storage_layer::{AsLayerDesc, EvictionError, Layer, LayerFileName},
68 : Timeline,
69 : },
70 : };
71 :
72 64 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
73 : pub struct DiskUsageEvictionTaskConfig {
74 : pub max_usage_pct: Percent,
75 : pub min_avail_bytes: u64,
76 : #[serde(with = "humantime_serde")]
77 : pub period: Duration,
78 : #[cfg(feature = "testing")]
79 : pub mock_statvfs: Option<crate::statvfs::mock::Behavior>,
80 : /// Select sorting for evicted layers
81 : #[serde(default)]
82 : pub eviction_order: EvictionOrder,
83 : }
84 :
85 : /// Selects the sort order for eviction candidates *after* per tenant `min_resident_size`
86 : /// partitioning.
87 60 : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
88 : #[serde(tag = "type", content = "args")]
89 : pub enum EvictionOrder {
90 : /// Order the layers to be evicted by how recently they have been accessed in absolute
91 : /// time.
92 : ///
93 : /// This strategy is unfair when some tenants grow faster than others towards the slower
94 : /// growing.
95 : #[default]
96 : AbsoluteAccessed,
97 :
98 : /// Order the layers to be evicted by how recently they have been accessed relatively within
99 : /// the set of resident layers of a tenant.
100 : RelativeAccessed {
101 : /// Determines if the tenant with most layers should lose first.
102 : ///
103 : /// Having this enabled is currently the only reasonable option, because the order in which
104 : /// we read tenants is deterministic. If we find the need to use this as `false`, we need
105 : /// to ensure nondeterminism by adding in a random number to break the
106 : /// `relative_last_activity==0.0` ties.
107 : #[serde(default = "default_highest_layer_count_loses_first")]
108 : highest_layer_count_loses_first: bool,
109 : },
110 : }
111 :
112 0 : fn default_highest_layer_count_loses_first() -> bool {
113 0 : true
114 0 : }
115 :
116 : impl EvictionOrder {
117 16 : fn sort(&self, candidates: &mut [(MinResidentSizePartition, EvictionCandidate)]) {
118 16 : use EvictionOrder::*;
119 16 :
120 16 : match self {
121 9 : AbsoluteAccessed => {
122 2536 : candidates.sort_unstable_by_key(|(partition, candidate)| {
123 2536 : (*partition, candidate.last_activity_ts)
124 2536 : });
125 9 : }
126 2618 : RelativeAccessed { .. } => candidates.sort_unstable_by_key(|(partition, candidate)| {
127 2618 : (*partition, candidate.relative_last_activity)
128 2618 : }),
129 : }
130 16 : }
131 :
132 : /// Called to fill in the [`EvictionCandidate::relative_last_activity`] while iterating tenants
133 : /// layers in **most** recently used order.
134 564 : fn relative_last_activity(&self, total: usize, index: usize) -> finite_f32::FiniteF32 {
135 564 : use EvictionOrder::*;
136 564 :
137 564 : match self {
138 278 : AbsoluteAccessed => finite_f32::FiniteF32::ZERO,
139 : RelativeAccessed {
140 286 : highest_layer_count_loses_first,
141 : } => {
142 : // keeping the -1 or not decides if every tenant should lose their least recently accessed
143 : // layer OR if this should happen in the order of having highest layer count:
144 286 : let fudge = if *highest_layer_count_loses_first {
145 : // relative_last_activity vs. tenant layer count:
146 : // - 0.1..=1.0 (10 layers)
147 : // - 0.01..=1.0 (100 layers)
148 : // - 0.001..=1.0 (1000 layers)
149 : //
150 : // leading to evicting less of the smallest tenants.
151 86 : 0
152 : } else {
153 : // use full 0.0..=1.0 range, which means even the smallest tenants could always lose a
154 : // layer. the actual ordering is unspecified: for 10k tenants on a pageserver it could
155 : // be that less than 10k layer evictions is enough, so we would not need to evict from
156 : // all tenants.
157 : //
158 : // as the tenant ordering is now deterministic this could hit the same tenants
159 : // disproportionetly on multiple invocations. alternative could be to remember how many
160 : // layers did we evict last time from this tenant, and inject that as an additional
161 : // fudge here.
162 200 : 1
163 : };
164 :
165 286 : let total = total.checked_sub(fudge).filter(|&x| x > 1).unwrap_or(1);
166 286 : let divider = total as f32;
167 286 :
168 286 : // most recently used is always (total - 0) / divider == 1.0
169 286 : // least recently used depends on the fudge:
170 286 : // - (total - 1) - (total - 1) / total => 0 / total
171 286 : // - total - (total - 1) / total => 1 / total
172 286 : let distance = (total - index) as f32;
173 286 :
174 286 : finite_f32::FiniteF32::try_from_normalized(distance / divider)
175 286 : .unwrap_or_else(|val| {
176 0 : tracing::warn!(%fudge, "calculated invalid relative_last_activity for i={index}, total={total}: {val}");
177 0 : finite_f32::FiniteF32::ZERO
178 286 : })
179 : }
180 : }
181 564 : }
182 : }
183 :
184 604 : #[derive(Default)]
185 : pub struct State {
186 : /// Exclude http requests and background task from running at the same time.
187 : mutex: tokio::sync::Mutex<()>,
188 : }
189 :
190 604 : pub fn launch_disk_usage_global_eviction_task(
191 604 : conf: &'static PageServerConf,
192 604 : storage: GenericRemoteStorage,
193 604 : state: Arc<State>,
194 604 : tenant_manager: Arc<TenantManager>,
195 604 : background_jobs_barrier: completion::Barrier,
196 604 : ) -> anyhow::Result<()> {
197 604 : let Some(task_config) = &conf.disk_usage_based_eviction else {
198 600 : info!("disk usage based eviction task not configured");
199 600 : return Ok(());
200 : };
201 :
202 4 : info!("launching disk usage based eviction task");
203 :
204 4 : task_mgr::spawn(
205 4 : BACKGROUND_RUNTIME.handle(),
206 4 : TaskKind::DiskUsageEviction,
207 4 : None,
208 4 : None,
209 4 : "disk usage based eviction",
210 4 : false,
211 4 : async move {
212 4 : let cancel = task_mgr::shutdown_token();
213 4 :
214 4 : // wait until initial load is complete, because we cannot evict from loading tenants.
215 7 : tokio::select! {
216 7 : _ = cancel.cancelled() => { return Ok(()); },
217 7 : _ = background_jobs_barrier.wait() => { }
218 7 : };
219 :
220 84 : disk_usage_eviction_task(&state, task_config, &storage, tenant_manager, cancel).await;
221 0 : Ok(())
222 4 : },
223 4 : );
224 4 :
225 4 : Ok(())
226 604 : }
227 :
228 0 : #[instrument(skip_all)]
229 : async fn disk_usage_eviction_task(
230 : state: &State,
231 : task_config: &DiskUsageEvictionTaskConfig,
232 : storage: &GenericRemoteStorage,
233 : tenant_manager: Arc<TenantManager>,
234 : cancel: CancellationToken,
235 : ) {
236 0 : scopeguard::defer! {
237 0 : info!("disk usage based eviction task finishing");
238 : };
239 :
240 : use crate::tenant::tasks::random_init_delay;
241 : {
242 : if random_init_delay(task_config.period, &cancel)
243 : .await
244 : .is_err()
245 : {
246 : return;
247 : }
248 : }
249 :
250 : let mut iteration_no = 0;
251 : loop {
252 : iteration_no += 1;
253 : let start = Instant::now();
254 :
255 6 : async {
256 6 : let res = disk_usage_eviction_task_iteration(
257 6 : state,
258 6 : task_config,
259 6 : storage,
260 6 : &tenant_manager,
261 6 : &cancel,
262 6 : )
263 77 : .await;
264 :
265 6 : match res {
266 5 : Ok(()) => {}
267 1 : Err(e) => {
268 1 : // these stat failures are expected to be very rare
269 1 : warn!("iteration failed, unexpected error: {e:#}");
270 : }
271 : }
272 6 : }
273 : .instrument(tracing::info_span!("iteration", iteration_no))
274 : .await;
275 :
276 : let sleep_until = start + task_config.period;
277 : if tokio::time::timeout_at(sleep_until, cancel.cancelled())
278 : .await
279 : .is_ok()
280 : {
281 : break;
282 : }
283 : }
284 : }
285 :
286 : pub trait Usage: Clone + Copy + std::fmt::Debug {
287 : fn has_pressure(&self) -> bool;
288 : fn add_available_bytes(&mut self, bytes: u64);
289 : }
290 :
291 6 : async fn disk_usage_eviction_task_iteration(
292 6 : state: &State,
293 6 : task_config: &DiskUsageEvictionTaskConfig,
294 6 : storage: &GenericRemoteStorage,
295 6 : tenant_manager: &Arc<TenantManager>,
296 6 : cancel: &CancellationToken,
297 6 : ) -> anyhow::Result<()> {
298 6 : let tenants_dir = tenant_manager.get_conf().tenants_path();
299 6 : let usage_pre = filesystem_level_usage::get(&tenants_dir, task_config)
300 6 : .context("get filesystem-level disk usage before evictions")?;
301 5 : let res = disk_usage_eviction_task_iteration_impl(
302 5 : state,
303 5 : storage,
304 5 : usage_pre,
305 5 : tenant_manager,
306 5 : task_config.eviction_order,
307 5 : cancel,
308 5 : )
309 77 : .await;
310 5 : match res {
311 5 : Ok(outcome) => {
312 0 : debug!(?outcome, "disk_usage_eviction_iteration finished");
313 5 : match outcome {
314 2 : IterationOutcome::NoPressure | IterationOutcome::Cancelled => {
315 2 : // nothing to do, select statement below will handle things
316 2 : }
317 3 : IterationOutcome::Finished(outcome) => {
318 : // Verify with statvfs whether we made any real progress
319 3 : let after = filesystem_level_usage::get(&tenants_dir, task_config)
320 3 : // It's quite unlikely to hit the error here. Keep the code simple and bail out.
321 3 : .context("get filesystem-level disk usage after evictions")?;
322 :
323 0 : debug!(?after, "disk usage");
324 :
325 3 : if after.has_pressure() {
326 : // Don't bother doing an out-of-order iteration here now.
327 : // In practice, the task period is set to a value in the tens-of-seconds range,
328 : // which will cause another iteration to happen soon enough.
329 : // TODO: deltas between the three different usages would be helpful,
330 : // consider MiB, GiB, TiB
331 0 : warn!(?outcome, ?after, "disk usage still high");
332 : } else {
333 3 : info!(?outcome, ?after, "disk usage pressure relieved");
334 : }
335 : }
336 : }
337 : }
338 0 : Err(e) => {
339 0 : error!("disk_usage_eviction_iteration failed: {:#}", e);
340 : }
341 : }
342 :
343 5 : Ok(())
344 6 : }
345 :
346 13 : #[derive(Debug, Serialize)]
347 : #[allow(clippy::large_enum_variant)]
348 : pub enum IterationOutcome<U> {
349 : NoPressure,
350 : Cancelled,
351 : Finished(IterationOutcomeFinished<U>),
352 : }
353 :
354 : #[allow(dead_code)]
355 16 : #[derive(Debug, Serialize)]
356 : pub struct IterationOutcomeFinished<U> {
357 : /// The actual usage observed before we started the iteration.
358 : before: U,
359 : /// The expected value for `after`, according to internal accounting, after phase 1.
360 : planned: PlannedUsage<U>,
361 : /// The outcome of phase 2, where we actually do the evictions.
362 : ///
363 : /// If all layers that phase 1 planned to evict _can_ actually get evicted, this will
364 : /// be the same as `planned`.
365 : assumed: AssumedUsage<U>,
366 : }
367 :
368 16 : #[derive(Debug, Serialize)]
369 : #[allow(dead_code)]
370 : struct AssumedUsage<U> {
371 : /// The expected value for `after`, after phase 2.
372 : projected_after: U,
373 : /// The layers we failed to evict during phase 2.
374 : failed: LayerCount,
375 : }
376 :
377 : #[allow(dead_code)]
378 16 : #[derive(Debug, Serialize)]
379 : struct PlannedUsage<U> {
380 : respecting_tenant_min_resident_size: U,
381 : fallback_to_global_lru: Option<U>,
382 : }
383 :
384 : #[allow(dead_code)]
385 16 : #[derive(Debug, Default, Serialize)]
386 : struct LayerCount {
387 : file_sizes: u64,
388 : count: usize,
389 : }
390 :
391 18 : pub(crate) async fn disk_usage_eviction_task_iteration_impl<U: Usage>(
392 18 : state: &State,
393 18 : _storage: &GenericRemoteStorage,
394 18 : usage_pre: U,
395 18 : tenant_manager: &Arc<TenantManager>,
396 18 : eviction_order: EvictionOrder,
397 18 : cancel: &CancellationToken,
398 18 : ) -> anyhow::Result<IterationOutcome<U>> {
399 : // use tokio's mutex to get a Sync guard (instead of std::sync::Mutex)
400 18 : let _g = state
401 18 : .mutex
402 18 : .try_lock()
403 18 : .map_err(|_| anyhow::anyhow!("iteration is already executing"))?;
404 :
405 0 : debug!(?usage_pre, "disk usage");
406 :
407 18 : if !usage_pre.has_pressure() {
408 2 : return Ok(IterationOutcome::NoPressure);
409 16 : }
410 :
411 16 : warn!(
412 16 : ?usage_pre,
413 16 : "running disk usage based eviction due to pressure"
414 16 : );
415 :
416 16 : let candidates =
417 16 : match collect_eviction_candidates(tenant_manager, eviction_order, cancel).await? {
418 : EvictionCandidates::Cancelled => {
419 0 : return Ok(IterationOutcome::Cancelled);
420 : }
421 16 : EvictionCandidates::Finished(partitioned) => partitioned,
422 16 : };
423 16 :
424 16 : // Debug-log the list of candidates
425 16 : let now = SystemTime::now();
426 562 : for (i, (partition, candidate)) in candidates.iter().enumerate() {
427 562 : let nth = i + 1;
428 562 : let total_candidates = candidates.len();
429 562 : let size = candidate.layer.get_file_size();
430 562 : let rel = candidate.relative_last_activity;
431 0 : debug!(
432 0 : "cand {nth}/{total_candidates}: size={size}, rel_last_activity={rel}, no_access_for={}us, partition={partition:?}, {}/{}/{}",
433 0 : now.duration_since(candidate.last_activity_ts)
434 0 : .unwrap()
435 0 : .as_micros(),
436 0 : candidate.layer.get_tenant_shard_id(),
437 0 : candidate.layer.get_timeline_id(),
438 0 : candidate.layer.get_name(),
439 0 : );
440 : }
441 :
442 : // phase1: select victims to relieve pressure
443 : //
444 : // Walk through the list of candidates, until we have accumulated enough layers to get
445 : // us back under the pressure threshold. 'usage_planned' is updated so that it tracks
446 : // how much disk space would be used after evicting all the layers up to the current
447 : // point in the list.
448 : //
449 : // If we get far enough in the list that we start to evict layers that are below
450 : // the tenant's min-resident-size threshold, print a warning, and memorize the disk
451 : // usage at that point, in 'usage_planned_min_resident_size_respecting'.
452 :
453 16 : let selection = select_victims(&candidates, usage_pre);
454 16 :
455 16 : let (evicted_amount, usage_planned) = selection.into_amount_and_planned();
456 16 :
457 16 : // phase2: evict layers
458 16 :
459 16 : let mut js = tokio::task::JoinSet::new();
460 16 : let limit = 1000;
461 16 :
462 16 : let mut evicted = candidates.into_iter().take(evicted_amount).fuse();
463 16 : let mut consumed_all = false;
464 16 :
465 16 : // After the evictions, `usage_assumed` is the post-eviction usage,
466 16 : // according to internal accounting.
467 16 : let mut usage_assumed = usage_pre;
468 16 : let mut evictions_failed = LayerCount::default();
469 16 :
470 16 : let evict_layers = async move {
471 : loop {
472 524 : let next = if js.len() >= limit || consumed_all {
473 226 : js.join_next().await
474 298 : } else if !js.is_empty() {
475 : // opportunistically consume ready result, one per each new evicted
476 282 : futures::future::FutureExt::now_or_never(js.join_next()).and_then(|x| x)
477 : } else {
478 16 : None
479 : };
480 :
481 524 : if let Some(next) = next {
482 0 : match next {
483 282 : Ok(Ok(file_size)) => {
484 282 : usage_assumed.add_available_bytes(file_size);
485 282 : }
486 0 : Ok(Err((file_size, EvictionError::NotFound | EvictionError::Downloaded))) => {
487 0 : evictions_failed.file_sizes += file_size;
488 0 : evictions_failed.count += 1;
489 0 : }
490 0 : Err(je) if je.is_cancelled() => unreachable!("not used"),
491 0 : Err(je) if je.is_panic() => { /* already logged */ }
492 0 : Err(je) => tracing::error!("unknown JoinError: {je:?}"),
493 : }
494 242 : }
495 :
496 524 : if consumed_all && js.is_empty() {
497 16 : break;
498 508 : }
499 :
500 : // calling again when consumed_all is fine as evicted is fused.
501 508 : let Some((_partition, candidate)) = evicted.next() else {
502 226 : consumed_all = true;
503 226 : continue;
504 : };
505 :
506 282 : match candidate.layer {
507 269 : EvictionLayer::Attached(layer) => {
508 269 : let file_size = layer.layer_desc().file_size;
509 269 : js.spawn(async move {
510 269 : layer
511 269 : .evict_and_wait()
512 269 : .await
513 269 : .map(|()| file_size)
514 269 : .map_err(|e| (file_size, e))
515 269 : });
516 269 : }
517 13 : EvictionLayer::Secondary(layer) => {
518 13 : let file_size = layer.metadata.file_size();
519 13 : let tenant_manager = tenant_manager.clone();
520 13 :
521 13 : js.spawn(async move {
522 13 : layer
523 13 : .secondary_tenant
524 13 : .evict_layer(tenant_manager.get_conf(), layer.timeline_id, layer.name)
525 13 : .await;
526 13 : Ok(file_size)
527 13 : });
528 13 : }
529 : }
530 282 : tokio::task::yield_now().await;
531 : }
532 :
533 16 : (usage_assumed, evictions_failed)
534 16 : };
535 :
536 16 : let (usage_assumed, evictions_failed) = tokio::select! {
537 16 : tuple = evict_layers => { tuple },
538 : _ = cancel.cancelled() => {
539 : // dropping joinset will abort all pending evict_and_waits and that is fine, our
540 : // requests will still stand
541 : return Ok(IterationOutcome::Cancelled);
542 : }
543 : };
544 :
545 16 : Ok(IterationOutcome::Finished(IterationOutcomeFinished {
546 16 : before: usage_pre,
547 16 : planned: usage_planned,
548 16 : assumed: AssumedUsage {
549 16 : projected_after: usage_assumed,
550 16 : failed: evictions_failed,
551 16 : },
552 16 : }))
553 18 : }
554 :
555 0 : #[derive(Clone)]
556 : pub(crate) struct EvictionSecondaryLayer {
557 : pub(crate) secondary_tenant: Arc<SecondaryTenant>,
558 : pub(crate) timeline_id: TimelineId,
559 : pub(crate) name: LayerFileName,
560 : pub(crate) metadata: LayerFileMetadata,
561 : }
562 :
563 : /// Full [`Layer`] objects are specific to tenants in attached mode. This type is a layer
564 : /// of indirection to store either a `Layer`, or a reference to a secondary tenant and a layer name.
565 0 : #[derive(Clone)]
566 : pub(crate) enum EvictionLayer {
567 : Attached(Layer),
568 : #[allow(dead_code)]
569 : Secondary(EvictionSecondaryLayer),
570 : }
571 :
572 : impl From<Layer> for EvictionLayer {
573 3017 : fn from(value: Layer) -> Self {
574 3017 : Self::Attached(value)
575 3017 : }
576 : }
577 :
578 : impl EvictionLayer {
579 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
580 0 : match self {
581 0 : Self::Attached(l) => &l.layer_desc().tenant_shard_id,
582 0 : Self::Secondary(sl) => sl.secondary_tenant.get_tenant_shard_id(),
583 : }
584 0 : }
585 :
586 0 : pub(crate) fn get_timeline_id(&self) -> &TimelineId {
587 0 : match self {
588 0 : Self::Attached(l) => &l.layer_desc().timeline_id,
589 0 : Self::Secondary(sl) => &sl.timeline_id,
590 : }
591 0 : }
592 :
593 4986 : pub(crate) fn get_name(&self) -> LayerFileName {
594 4986 : match self {
595 4986 : Self::Attached(l) => l.layer_desc().filename(),
596 0 : Self::Secondary(sl) => sl.name.clone(),
597 : }
598 4986 : }
599 :
600 1406 : pub(crate) fn get_file_size(&self) -> u64 {
601 1406 : match self {
602 1317 : Self::Attached(l) => l.layer_desc().file_size,
603 89 : Self::Secondary(sl) => sl.metadata.file_size(),
604 : }
605 1406 : }
606 : }
607 :
608 0 : #[derive(Clone)]
609 : pub(crate) struct EvictionCandidate {
610 : pub(crate) layer: EvictionLayer,
611 : pub(crate) last_activity_ts: SystemTime,
612 : pub(crate) relative_last_activity: finite_f32::FiniteF32,
613 : }
614 :
615 : impl std::fmt::Display for EvictionLayer {
616 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
617 0 : match self {
618 0 : Self::Attached(l) => l.fmt(f),
619 0 : Self::Secondary(sl) => {
620 0 : write!(f, "{}/{}", sl.timeline_id, sl.name)
621 : }
622 : }
623 0 : }
624 : }
625 :
626 : pub(crate) struct DiskUsageEvictionInfo {
627 : /// Timeline's largest layer (remote or resident)
628 : pub max_layer_size: Option<u64>,
629 : /// Timeline's resident layers
630 : pub resident_layers: Vec<EvictionCandidate>,
631 : }
632 :
633 : impl std::fmt::Debug for EvictionCandidate {
634 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
635 0 : // format the tv_sec, tv_nsec into rfc3339 in case someone is looking at it
636 0 : // having to allocate a string to this is bad, but it will rarely be formatted
637 0 : let ts = chrono::DateTime::<chrono::Utc>::from(self.last_activity_ts);
638 0 : let ts = ts.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true);
639 0 : struct DisplayIsDebug<'a, T>(&'a T);
640 0 : impl<'a, T: std::fmt::Display> std::fmt::Debug for DisplayIsDebug<'a, T> {
641 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
642 0 : write!(f, "{}", self.0)
643 0 : }
644 0 : }
645 0 : f.debug_struct("LocalLayerInfoForDiskUsageEviction")
646 0 : .field("layer", &DisplayIsDebug(&self.layer))
647 0 : .field("last_activity", &ts)
648 0 : .finish()
649 0 : }
650 : }
651 :
652 2593 : #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
653 : enum MinResidentSizePartition {
654 : Above,
655 : Below,
656 : }
657 :
658 : enum EvictionCandidates {
659 : Cancelled,
660 : Finished(Vec<(MinResidentSizePartition, EvictionCandidate)>),
661 : }
662 :
663 : /// Gather the eviction candidates.
664 : ///
665 : /// The returned `Ok(EvictionCandidates::Finished(candidates))` is sorted in eviction
666 : /// order. A caller that evicts in that order, until pressure is relieved, implements
667 : /// the eviction policy outlined in the module comment.
668 : ///
669 : /// # Example with EvictionOrder::AbsoluteAccessed
670 : ///
671 : /// Imagine that there are two tenants, A and B, with five layers each, a-e.
672 : /// Each layer has size 100, and both tenant's min_resident_size is 150.
673 : /// The eviction order would be
674 : ///
675 : /// ```text
676 : /// partition last_activity_ts tenant/layer
677 : /// Above 18:30 A/c
678 : /// Above 19:00 A/b
679 : /// Above 18:29 B/c
680 : /// Above 19:05 B/b
681 : /// Above 20:00 B/a
682 : /// Above 20:03 A/a
683 : /// Below 20:30 A/d
684 : /// Below 20:40 B/d
685 : /// Below 20:45 B/e
686 : /// Below 20:58 A/e
687 : /// ```
688 : ///
689 : /// Now, if we need to evict 300 bytes to relieve pressure, we'd evict `A/c, A/b, B/c`.
690 : /// They are all in the `Above` partition, so, we respected each tenant's min_resident_size.
691 : ///
692 : /// But, if we need to evict 900 bytes to relieve pressure, we'd evict
693 : /// `A/c, A/b, B/c, B/b, B/a, A/a, A/d, B/d, B/e`, reaching into the `Below` partition
694 : /// after exhauting the `Above` partition.
695 : /// So, we did not respect each tenant's min_resident_size.
696 : ///
697 : /// # Example with EvictionOrder::RelativeAccessed
698 : ///
699 : /// ```text
700 : /// partition relative_age last_activity_ts tenant/layer
701 : /// Above 0/4 18:30 A/c
702 : /// Above 0/4 18:29 B/c
703 : /// Above 1/4 19:00 A/b
704 : /// Above 1/4 19:05 B/b
705 : /// Above 2/4 20:00 B/a
706 : /// Above 2/4 20:03 A/a
707 : /// Below 3/4 20:30 A/d
708 : /// Below 3/4 20:40 B/d
709 : /// Below 4/4 20:45 B/e
710 : /// Below 4/4 20:58 A/e
711 : /// ```
712 : ///
713 : /// With tenants having the same number of layers the picture does not change much. The same with
714 : /// A having many more layers **resident** (not all of them listed):
715 : ///
716 : /// ```text
717 : /// Above 0/100 18:30 A/c
718 : /// Above 0/4 18:29 B/c
719 : /// Above 1/100 19:00 A/b
720 : /// Above 2/100 20:03 A/a
721 : /// Above 3/100 20:03 A/nth_3
722 : /// Above 4/100 20:03 A/nth_4
723 : /// ...
724 : /// Above 1/4 19:05 B/b
725 : /// Above 25/100 20:04 A/nth_25
726 : /// ...
727 : /// Above 2/4 20:00 B/a
728 : /// Above 50/100 20:10 A/nth_50
729 : /// ...
730 : /// Below 3/4 20:40 B/d
731 : /// Below 99/100 20:30 A/nth_99
732 : /// Below 4/4 20:45 B/e
733 : /// Below 100/100 20:58 A/nth_100
734 : /// ```
735 : ///
736 : /// Now it's easier to see that because A has grown fast it has more layers to get evicted. What is
737 : /// difficult to see is what happens on the next round assuming the evicting 23 from the above list
738 : /// relieves the pressure (22 A layers gone, 1 B layers gone) but a new fast growing tenant C has
739 : /// appeared:
740 : ///
741 : /// ```text
742 : /// Above 0/87 20:04 A/nth_23
743 : /// Above 0/3 19:05 B/b
744 : /// Above 0/50 20:59 C/nth_0
745 : /// Above 1/87 20:04 A/nth_24
746 : /// Above 1/50 21:00 C/nth_1
747 : /// Above 2/87 20:04 A/nth_25
748 : /// ...
749 : /// Above 16/50 21:02 C/nth_16
750 : /// Above 1/3 20:00 B/a
751 : /// Above 27/87 20:10 A/nth_50
752 : /// ...
753 : /// Below 2/3 20:40 B/d
754 : /// Below 49/50 21:05 C/nth_49
755 : /// Below 86/87 20:30 A/nth_99
756 : /// Below 3/3 20:45 B/e
757 : /// Below 50/50 21:05 C/nth_50
758 : /// Below 87/87 20:58 A/nth_100
759 : /// ```
760 : ///
761 : /// Now relieving pressure with 23 layers would cost:
762 : /// - tenant A 14 layers
763 : /// - tenant B 1 layer
764 : /// - tenant C 8 layers
765 16 : async fn collect_eviction_candidates(
766 16 : tenant_manager: &Arc<TenantManager>,
767 16 : eviction_order: EvictionOrder,
768 16 : cancel: &CancellationToken,
769 16 : ) -> anyhow::Result<EvictionCandidates> {
770 : // get a snapshot of the list of tenants
771 16 : let tenants = tenant::mgr::list_tenants()
772 0 : .await
773 16 : .context("get list of tenants")?;
774 :
775 : // TODO: avoid listing every layer in every tenant: this loop can block the executor,
776 : // and the resulting data structure can be huge.
777 : // (https://github.com/neondatabase/neon/issues/6224)
778 16 : let mut candidates = Vec::new();
779 :
780 52 : for (tenant_id, _state, _gen) in tenants {
781 36 : if cancel.is_cancelled() {
782 0 : return Ok(EvictionCandidates::Cancelled);
783 36 : }
784 36 : let tenant = match tenant::mgr::get_tenant(tenant_id, true) {
785 35 : Ok(tenant) => tenant,
786 1 : Err(e) => {
787 : // this can happen if tenant has lifecycle transition after we fetched it
788 0 : debug!("failed to get tenant: {e:#}");
789 1 : continue;
790 : }
791 : };
792 :
793 35 : if tenant.cancel.is_cancelled() {
794 0 : info!(%tenant_id, "Skipping tenant for eviction, it is shutting down");
795 0 : continue;
796 35 : }
797 35 :
798 35 : // collect layers from all timelines in this tenant
799 35 : //
800 35 : // If one of the timelines becomes `!is_active()` during the iteration,
801 35 : // for example because we're shutting down, then `max_layer_size` can be too small.
802 35 : // That's OK. This code only runs under a disk pressure situation, and being
803 35 : // a little unfair to tenants during shutdown in such a situation is tolerable.
804 35 : let mut tenant_candidates = Vec::new();
805 35 : let mut max_layer_size = 0;
806 35 : for tl in tenant.list_timelines() {
807 35 : if !tl.is_active() {
808 0 : continue;
809 35 : }
810 35 : let info = tl.get_local_layers_for_disk_usage_eviction().await;
811 0 : debug!(tenant_id=%tl.tenant_shard_id.tenant_id, shard_id=%tl.tenant_shard_id.shard_slug(), timeline_id=%tl.timeline_id, "timeline resident layers count: {}", info.resident_layers.len());
812 35 : tenant_candidates.extend(info.resident_layers.into_iter());
813 35 : max_layer_size = max_layer_size.max(info.max_layer_size.unwrap_or(0));
814 35 :
815 35 : if cancel.is_cancelled() {
816 0 : return Ok(EvictionCandidates::Cancelled);
817 35 : }
818 : }
819 :
820 : // `min_resident_size` defaults to maximum layer file size of the tenant.
821 : // This ensures that each tenant can have at least one layer resident at a given time,
822 : // ensuring forward progress for a single Timeline::get in that tenant.
823 : // It's a questionable heuristic since, usually, there are many Timeline::get
824 : // requests going on for a tenant, and, at least in Neon prod, the median
825 : // layer file size is much smaller than the compaction target size.
826 : // We could be better here, e.g., sum of all L0 layers + most recent L1 layer.
827 : // That's what's typically used by the various background loops.
828 : //
829 : // The default can be overridden with a fixed value in the tenant conf.
830 : // A default override can be put in the default tenant conf in the pageserver.toml.
831 35 : let min_resident_size = if let Some(s) = tenant.get_min_resident_size_override() {
832 0 : debug!(
833 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
834 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
835 0 : overridden_size=s,
836 0 : "using overridden min resident size for tenant"
837 0 : );
838 4 : s
839 : } else {
840 0 : debug!(
841 0 : tenant_id=%tenant.tenant_shard_id().tenant_id,
842 0 : shard_id=%tenant.tenant_shard_id().shard_slug(),
843 0 : max_layer_size,
844 0 : "using max layer size as min_resident_size for tenant",
845 0 : );
846 31 : max_layer_size
847 : };
848 :
849 : // Sort layers most-recently-used first, then partition by
850 : // cumsum above/below min_resident_size.
851 35 : tenant_candidates
852 4718 : .sort_unstable_by_key(|layer_info| std::cmp::Reverse(layer_info.last_activity_ts));
853 35 : let mut cumsum: i128 = 0;
854 35 :
855 35 : let total = tenant_candidates.len();
856 :
857 524 : for (i, mut candidate) in tenant_candidates.into_iter().enumerate() {
858 : // as we iterate this reverse sorted list, the most recently accessed layer will always
859 : // be 1.0; this is for us to evict it last.
860 524 : candidate.relative_last_activity = eviction_order.relative_last_activity(total, i);
861 :
862 524 : let partition = if cumsum > min_resident_size as i128 {
863 397 : MinResidentSizePartition::Above
864 : } else {
865 127 : MinResidentSizePartition::Below
866 : };
867 524 : cumsum += i128::from(candidate.layer.get_file_size());
868 524 : candidates.push((partition, candidate));
869 : }
870 : }
871 :
872 : // Note: the same tenant ID might be hit twice, if it transitions from attached to
873 : // secondary while we run. That is okay: when we eventually try and run the eviction,
874 : // the `Gate` on the object will ensure that whichever one has already been shut down
875 : // will not delete anything.
876 :
877 16 : let mut secondary_tenants = Vec::new();
878 16 : tenant_manager.foreach_secondary_tenants(
879 16 : |_tenant_shard_id: &TenantShardId, state: &Arc<SecondaryTenant>| {
880 2 : secondary_tenants.push(state.clone());
881 16 : },
882 16 : );
883 :
884 18 : for secondary_tenant in secondary_tenants {
885 2 : let mut layer_info = secondary_tenant.get_layers_for_eviction();
886 2 :
887 2 : layer_info
888 2 : .resident_layers
889 238 : .sort_unstable_by_key(|layer_info| std::cmp::Reverse(layer_info.last_activity_ts));
890 2 :
891 38 : candidates.extend(layer_info.resident_layers.into_iter().map(|candidate| {
892 38 : (
893 38 : // Secondary locations' layers are always considered above the min resident size,
894 38 : // i.e. secondary locations are permitted to be trimmed to zero layers if all
895 38 : // the layers have sufficiently old access times.
896 38 : MinResidentSizePartition::Above,
897 38 : candidate,
898 38 : )
899 38 : }));
900 2 : }
901 :
902 16 : debug_assert!(MinResidentSizePartition::Above < MinResidentSizePartition::Below,
903 0 : "as explained in the function's doc comment, layers that aren't in the tenant's min_resident_size are evicted first");
904 :
905 16 : eviction_order.sort(&mut candidates);
906 16 :
907 16 : Ok(EvictionCandidates::Finished(candidates))
908 16 : }
909 :
910 : /// Given a pre-sorted vec of all layers in the system, select the first N which are enough to
911 : /// relieve pressure.
912 : ///
913 : /// Returns the amount of candidates selected, with the planned usage.
914 16 : fn select_victims<U: Usage>(
915 16 : candidates: &[(MinResidentSizePartition, EvictionCandidate)],
916 16 : usage_pre: U,
917 16 : ) -> VictimSelection<U> {
918 16 : let mut usage_when_switched = None;
919 16 : let mut usage_planned = usage_pre;
920 16 : let mut evicted_amount = 0;
921 :
922 295 : for (i, (partition, candidate)) in candidates.iter().enumerate() {
923 295 : if !usage_planned.has_pressure() {
924 13 : break;
925 282 : }
926 282 :
927 282 : if partition == &MinResidentSizePartition::Below && usage_when_switched.is_none() {
928 3 : usage_when_switched = Some((usage_planned, i));
929 279 : }
930 :
931 282 : usage_planned.add_available_bytes(candidate.layer.get_file_size());
932 282 : evicted_amount += 1;
933 : }
934 :
935 16 : VictimSelection {
936 16 : amount: evicted_amount,
937 16 : usage_pre,
938 16 : usage_when_switched,
939 16 : usage_planned,
940 16 : }
941 16 : }
942 :
943 : struct VictimSelection<U> {
944 : amount: usize,
945 : usage_pre: U,
946 : usage_when_switched: Option<(U, usize)>,
947 : usage_planned: U,
948 : }
949 :
950 : impl<U: Usage> VictimSelection<U> {
951 16 : fn into_amount_and_planned(self) -> (usize, PlannedUsage<U>) {
952 16 : debug!(
953 0 : evicted_amount=%self.amount,
954 0 : "took enough candidates for pressure to be relieved"
955 0 : );
956 :
957 16 : if let Some((usage_planned, candidate_no)) = self.usage_when_switched.as_ref() {
958 3 : warn!(usage_pre=?self.usage_pre, ?usage_planned, candidate_no, "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy");
959 13 : }
960 :
961 16 : let planned = match self.usage_when_switched {
962 3 : Some((respecting_tenant_min_resident_size, _)) => PlannedUsage {
963 3 : respecting_tenant_min_resident_size,
964 3 : fallback_to_global_lru: Some(self.usage_planned),
965 3 : },
966 13 : None => PlannedUsage {
967 13 : respecting_tenant_min_resident_size: self.usage_planned,
968 13 : fallback_to_global_lru: None,
969 13 : },
970 : };
971 :
972 16 : (self.amount, planned)
973 16 : }
974 : }
975 :
976 : struct TimelineKey(Arc<Timeline>);
977 :
978 : impl PartialEq for TimelineKey {
979 0 : fn eq(&self, other: &Self) -> bool {
980 0 : Arc::ptr_eq(&self.0, &other.0)
981 0 : }
982 : }
983 :
984 : impl Eq for TimelineKey {}
985 :
986 : impl std::hash::Hash for TimelineKey {
987 0 : fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
988 0 : Arc::as_ptr(&self.0).hash(state);
989 0 : }
990 : }
991 :
992 : impl std::ops::Deref for TimelineKey {
993 : type Target = Timeline;
994 :
995 0 : fn deref(&self) -> &Self::Target {
996 0 : self.0.as_ref()
997 0 : }
998 : }
999 :
1000 : /// A totally ordered f32 subset we can use with sorting functions.
1001 : pub(crate) mod finite_f32 {
1002 :
1003 : /// A totally ordered f32 subset we can use with sorting functions.
1004 0 : #[derive(Clone, Copy, PartialEq)]
1005 : pub struct FiniteF32(f32);
1006 :
1007 : impl std::fmt::Debug for FiniteF32 {
1008 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1009 0 : std::fmt::Debug::fmt(&self.0, f)
1010 0 : }
1011 : }
1012 :
1013 : impl std::fmt::Display for FiniteF32 {
1014 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1015 0 : std::fmt::Display::fmt(&self.0, f)
1016 0 : }
1017 : }
1018 :
1019 : impl std::cmp::Eq for FiniteF32 {}
1020 :
1021 : impl std::cmp::PartialOrd for FiniteF32 {
1022 1068 : fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
1023 1068 : Some(self.cmp(other))
1024 1068 : }
1025 : }
1026 :
1027 : impl std::cmp::Ord for FiniteF32 {
1028 1068 : fn cmp(&self, other: &Self) -> std::cmp::Ordering {
1029 1068 : self.0.total_cmp(&other.0)
1030 1068 : }
1031 : }
1032 :
1033 : impl TryFrom<f32> for FiniteF32 {
1034 : type Error = f32;
1035 :
1036 0 : fn try_from(value: f32) -> Result<Self, Self::Error> {
1037 0 : if value.is_finite() {
1038 0 : Ok(FiniteF32(value))
1039 : } else {
1040 0 : Err(value)
1041 : }
1042 0 : }
1043 : }
1044 :
1045 : impl From<FiniteF32> for f32 {
1046 40 : fn from(value: FiniteF32) -> f32 {
1047 40 : value.0
1048 40 : }
1049 : }
1050 :
1051 : impl FiniteF32 {
1052 : pub const ZERO: FiniteF32 = FiniteF32(0.0);
1053 :
1054 286 : pub fn try_from_normalized(value: f32) -> Result<Self, f32> {
1055 286 : if (0.0..=1.0).contains(&value) {
1056 : // -0.0 is within the range, make sure it is assumed 0.0..=1.0
1057 286 : let value = value.abs();
1058 286 : Ok(FiniteF32(value))
1059 : } else {
1060 0 : Err(value)
1061 : }
1062 286 : }
1063 :
1064 40 : pub fn into_inner(self) -> f32 {
1065 40 : self.into()
1066 40 : }
1067 : }
1068 : }
1069 :
1070 : mod filesystem_level_usage {
1071 : use anyhow::Context;
1072 : use camino::Utf8Path;
1073 :
1074 : use crate::statvfs::Statvfs;
1075 :
1076 : use super::DiskUsageEvictionTaskConfig;
1077 :
1078 15 : #[derive(Debug, Clone, Copy)]
1079 : #[allow(dead_code)]
1080 : pub struct Usage<'a> {
1081 : config: &'a DiskUsageEvictionTaskConfig,
1082 :
1083 : /// Filesystem capacity
1084 : total_bytes: u64,
1085 : /// Free filesystem space
1086 : avail_bytes: u64,
1087 : }
1088 :
1089 : impl super::Usage for Usage<'_> {
1090 76 : fn has_pressure(&self) -> bool {
1091 76 : let usage_pct =
1092 76 : (100.0 * (1.0 - ((self.avail_bytes as f64) / (self.total_bytes as f64)))) as u64;
1093 76 :
1094 76 : let pressures = [
1095 76 : (
1096 76 : "min_avail_bytes",
1097 76 : self.avail_bytes < self.config.min_avail_bytes,
1098 76 : ),
1099 76 : (
1100 76 : "max_usage_pct",
1101 76 : usage_pct >= self.config.max_usage_pct.get() as u64,
1102 76 : ),
1103 76 : ];
1104 76 :
1105 124 : pressures.into_iter().any(|(_, has_pressure)| has_pressure)
1106 76 : }
1107 :
1108 114 : fn add_available_bytes(&mut self, bytes: u64) {
1109 114 : self.avail_bytes += bytes;
1110 114 : }
1111 : }
1112 :
1113 9 : pub fn get<'a>(
1114 9 : tenants_dir: &Utf8Path,
1115 9 : config: &'a DiskUsageEvictionTaskConfig,
1116 9 : ) -> anyhow::Result<Usage<'a>> {
1117 9 : let mock_config = {
1118 9 : #[cfg(feature = "testing")]
1119 9 : {
1120 9 : config.mock_statvfs.as_ref()
1121 : }
1122 : #[cfg(not(feature = "testing"))]
1123 : {
1124 : None
1125 : }
1126 : };
1127 :
1128 9 : let stat = Statvfs::get(tenants_dir, mock_config)
1129 9 : .context("statvfs failed, presumably directory got unlinked")?;
1130 :
1131 : // https://unix.stackexchange.com/a/703650
1132 8 : let blocksize = if stat.fragment_size() > 0 {
1133 8 : stat.fragment_size()
1134 : } else {
1135 0 : stat.block_size()
1136 : };
1137 :
1138 : // use blocks_available (b_avail) since, pageserver runs as unprivileged user
1139 8 : let avail_bytes = stat.blocks_available() * blocksize;
1140 8 : let total_bytes = stat.blocks() * blocksize;
1141 8 :
1142 8 : Ok(Usage {
1143 8 : config,
1144 8 : total_bytes,
1145 8 : avail_bytes,
1146 8 : })
1147 9 : }
1148 :
1149 2 : #[test]
1150 2 : fn max_usage_pct_pressure() {
1151 2 : use super::EvictionOrder;
1152 2 : use super::Usage as _;
1153 2 : use std::time::Duration;
1154 2 : use utils::serde_percent::Percent;
1155 2 :
1156 2 : let mut usage = Usage {
1157 2 : config: &DiskUsageEvictionTaskConfig {
1158 2 : max_usage_pct: Percent::new(85).unwrap(),
1159 2 : min_avail_bytes: 0,
1160 2 : period: Duration::MAX,
1161 2 : #[cfg(feature = "testing")]
1162 2 : mock_statvfs: None,
1163 2 : eviction_order: EvictionOrder::default(),
1164 2 : },
1165 2 : total_bytes: 100_000,
1166 2 : avail_bytes: 0,
1167 2 : };
1168 :
1169 2 : assert!(usage.has_pressure(), "expected pressure at 100%");
1170 :
1171 2 : usage.add_available_bytes(14_000);
1172 2 : assert!(usage.has_pressure(), "expected pressure at 86%");
1173 :
1174 2 : usage.add_available_bytes(999);
1175 2 : assert!(usage.has_pressure(), "expected pressure at 85.001%");
1176 :
1177 2 : usage.add_available_bytes(1);
1178 2 : assert!(usage.has_pressure(), "expected pressure at precisely 85%");
1179 :
1180 2 : usage.add_available_bytes(1);
1181 2 : assert!(!usage.has_pressure(), "no pressure at 84.999%");
1182 :
1183 2 : usage.add_available_bytes(999);
1184 2 : assert!(!usage.has_pressure(), "no pressure at 84%");
1185 :
1186 2 : usage.add_available_bytes(16_000);
1187 2 : assert!(!usage.has_pressure());
1188 2 : }
1189 : }
1190 :
1191 : #[cfg(test)]
1192 : mod tests {
1193 : use super::*;
1194 :
1195 2 : #[test]
1196 2 : fn relative_equal_bounds() {
1197 2 : let order = EvictionOrder::RelativeAccessed {
1198 2 : highest_layer_count_loses_first: false,
1199 2 : };
1200 2 :
1201 2 : let len = 10;
1202 2 : let v = (0..len)
1203 20 : .map(|i| order.relative_last_activity(len, i).into_inner())
1204 2 : .collect::<Vec<_>>();
1205 2 :
1206 2 : assert_eq!(v.first(), Some(&1.0));
1207 2 : assert_eq!(v.last(), Some(&0.0));
1208 18 : assert!(v.windows(2).all(|slice| slice[0] > slice[1]));
1209 2 : }
1210 :
1211 2 : #[test]
1212 2 : fn relative_spare_bounds() {
1213 2 : let order = EvictionOrder::RelativeAccessed {
1214 2 : highest_layer_count_loses_first: true,
1215 2 : };
1216 2 :
1217 2 : let len = 10;
1218 2 : let v = (0..len)
1219 20 : .map(|i| order.relative_last_activity(len, i).into_inner())
1220 2 : .collect::<Vec<_>>();
1221 2 :
1222 2 : assert_eq!(v.first(), Some(&1.0));
1223 2 : assert_eq!(v.last(), Some(&0.1));
1224 18 : assert!(v.windows(2).all(|slice| slice[0] > slice[1]));
1225 2 : }
1226 : }
|