LCOV - code coverage report
Current view: top level - pageserver/src/tenant/storage_layer/layer - tests.rs (source / functions) Coverage Total Hit
Test: aca806cab4756d7eb6a304846130f4a73a5d5393.info Lines: 99.6 % 947 943
Test Date: 2025-04-24 20:31:15 Functions: 100.0 % 41 41

            Line data    Source code
       1              : use std::time::UNIX_EPOCH;
       2              : 
       3              : use pageserver_api::key::{CONTROLFILE_KEY, Key};
       4              : use tokio::task::JoinSet;
       5              : use utils::completion::{self, Completion};
       6              : use utils::id::TimelineId;
       7              : 
       8              : use super::failpoints::{Failpoint, FailpointKind};
       9              : use super::*;
      10              : use crate::context::DownloadBehavior;
      11              : use crate::tenant::harness::{TenantHarness, test_img};
      12              : use crate::tenant::storage_layer::{IoConcurrency, LayerVisibilityHint};
      13              : 
      14              : /// Used in tests to advance a future to wanted await point, and not futher.
      15              : const ADVANCE: std::time::Duration = std::time::Duration::from_secs(3600);
      16              : 
      17              : /// Used in tests to indicate forever long timeout; has to be longer than the amount of ADVANCE
      18              : /// timeout uses to advance futures.
      19              : const FOREVER: std::time::Duration = std::time::Duration::from_secs(ADVANCE.as_secs() * 24 * 7);
      20              : 
      21              : /// Demonstrate the API and resident -> evicted -> resident -> deleted transitions.
      22              : #[tokio::test]
      23           12 : async fn smoke_test() {
      24           12 :     let handle = tokio::runtime::Handle::current();
      25           12 : 
      26           12 :     let h = TenantHarness::create("smoke_test").await.unwrap();
      27           12 :     let span = h.span();
      28           12 :     let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
      29           12 :     let (tenant, ctx) = h.load().await;
      30           12 :     let io_concurrency = IoConcurrency::spawn_for_test();
      31           12 : 
      32           12 :     let image_layers = vec![(
      33           12 :         Lsn(0x40),
      34           12 :         vec![(
      35           12 :             Key::from_hex("620000000033333333444444445500000000").unwrap(),
      36           12 :             test_img("foo"),
      37           12 :         )],
      38           12 :     )];
      39           12 : 
      40           12 :     // Create a test timeline with one real layer, and one synthetic test layer.  The synthetic
      41           12 :     // one is only there so that we can GC the real one without leaving the timeline's metadata
      42           12 :     // empty, which is an illegal state (see [`IndexPart::validate`]).
      43           12 :     let timeline = tenant
      44           12 :         .create_test_timeline_with_layers(
      45           12 :             TimelineId::generate(),
      46           12 :             Lsn(0x10),
      47           12 :             14,
      48           12 :             &ctx,
      49           12 :             Default::default(), // in-memory layers
      50           12 :             Default::default(),
      51           12 :             image_layers,
      52           12 :             Lsn(0x100),
      53           12 :         )
      54           12 :         .await
      55           12 :         .unwrap();
      56           12 :     let ctx = &ctx.with_scope_timeline(&timeline);
      57           12 : 
      58           12 :     // Grab one of the timeline's layers to exercise in the test, and the other layer that is just
      59           12 :     // there to avoid the timeline being illegally empty
      60           12 :     let (layer, dummy_layer) = {
      61           12 :         let mut layers = {
      62           12 :             let layers = timeline.layers.read().await;
      63           12 :             layers.likely_resident_layers().cloned().collect::<Vec<_>>()
      64           12 :         };
      65           12 : 
      66           12 :         assert_eq!(layers.len(), 2);
      67           12 : 
      68           24 :         layers.sort_by_key(|l| l.layer_desc().get_key_range().start);
      69           12 :         let synthetic_layer = layers.pop().unwrap();
      70           12 :         let real_layer = layers.pop().unwrap();
      71           12 :         tracing::info!(
      72           12 :             "real_layer={:?} ({}), synthetic_layer={:?} ({})",
      73            0 :             real_layer,
      74            0 :             real_layer.layer_desc().file_size,
      75            0 :             synthetic_layer,
      76            0 :             synthetic_layer.layer_desc().file_size
      77           12 :         );
      78           12 :         (real_layer, synthetic_layer)
      79           12 :     };
      80           12 : 
      81           12 :     // all layers created at pageserver are like `layer`, initialized with strong
      82           12 :     // Arc<DownloadedLayer>.
      83           12 : 
      84           12 :     let controlfile_keyspace = KeySpace {
      85           12 :         ranges: vec![CONTROLFILE_KEY..CONTROLFILE_KEY.next()],
      86           12 :     };
      87           12 : 
      88           12 :     let img_before = {
      89           12 :         let mut data = ValuesReconstructState::new(io_concurrency.clone());
      90           12 :         layer
      91           12 :             .get_values_reconstruct_data(
      92           12 :                 controlfile_keyspace.clone(),
      93           12 :                 Lsn(0x10)..Lsn(0x11),
      94           12 :                 &mut data,
      95           12 :                 ctx,
      96           12 :             )
      97           12 :             .await
      98           12 :             .unwrap();
      99           12 : 
     100           12 :         data.keys
     101           12 :             .remove(&CONTROLFILE_KEY)
     102           12 :             .expect("must be present")
     103           12 :             .collect_pending_ios()
     104           12 :             .await
     105           12 :             .expect("must not error")
     106           12 :             .img
     107           12 :             .take()
     108           12 :             .expect("tenant harness writes the control file")
     109           12 :     };
     110           12 : 
     111           12 :     // important part is evicting the layer, which can be done when there are no more ResidentLayer
     112           12 :     // instances -- there currently are none, only two `Layer` values, one in the layermap and on
     113           12 :     // in scope.
     114           12 :     layer.evict_and_wait(FOREVER).await.unwrap();
     115           12 : 
     116           12 :     // double-evict returns an error, which is valid if both eviction_task and disk usage based
     117           12 :     // eviction would both evict the same layer at the same time.
     118           12 : 
     119           12 :     let e = layer.evict_and_wait(FOREVER).await.unwrap_err();
     120           12 :     assert!(matches!(e, EvictionError::NotFound));
     121           12 : 
     122           12 :     let dl_ctx = RequestContextBuilder::from(ctx)
     123           12 :         .download_behavior(DownloadBehavior::Download)
     124           12 :         .attached_child();
     125           12 : 
     126           12 :     // on accesses when the layer is evicted, it will automatically be downloaded.
     127           12 :     let img_after = {
     128           12 :         let mut data = ValuesReconstructState::new(io_concurrency.clone());
     129           12 :         layer
     130           12 :             .get_values_reconstruct_data(
     131           12 :                 controlfile_keyspace.clone(),
     132           12 :                 Lsn(0x10)..Lsn(0x11),
     133           12 :                 &mut data,
     134           12 :                 &dl_ctx,
     135           12 :             )
     136           12 :             .instrument(download_span.clone())
     137           12 :             .await
     138           12 :             .unwrap();
     139           12 :         data.keys
     140           12 :             .remove(&CONTROLFILE_KEY)
     141           12 :             .expect("must be present")
     142           12 :             .collect_pending_ios()
     143           12 :             .await
     144           12 :             .expect("must not error")
     145           12 :             .img
     146           12 :             .take()
     147           12 :             .expect("tenant harness writes the control file")
     148           12 :     };
     149           12 : 
     150           12 :     assert_eq!(img_before, img_after);
     151           12 : 
     152           12 :     // evict_and_wait can timeout, but it doesn't cancel the evicting itself
     153           12 :     //
     154           12 :     // ZERO for timeout does not work reliably, so first take up all spawn_blocking slots to
     155           12 :     // artificially slow it down.
     156           12 :     let helper = SpawnBlockingPoolHelper::consume_all_spawn_blocking_threads(&handle).await;
     157           12 : 
     158           12 :     match layer
     159           12 :         .evict_and_wait(std::time::Duration::ZERO)
     160           12 :         .await
     161           12 :         .unwrap_err()
     162           12 :     {
     163           12 :         EvictionError::Timeout => {
     164           12 :             // expected, but note that the eviction is "still ongoing"
     165           12 :             helper.release().await;
     166           12 :             // exhaust spawn_blocking pool to ensure it is now complete
     167           12 :             SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle)
     168           12 :                 .await;
     169           12 :         }
     170           12 :         other => unreachable!("{other:?}"),
     171           12 :     }
     172           12 : 
     173           12 :     // only way to query if a layer is resident is to acquire a ResidentLayer instance.
     174           12 :     // Layer::keep_resident never downloads, but it might initialize if the layer file is found
     175           12 :     // downloaded locally.
     176           12 :     let none = layer.keep_resident().await;
     177           12 :     assert!(
     178           12 :         none.is_none(),
     179           12 :         "Expected none, because eviction removed the local file, found: {none:?}"
     180           12 :     );
     181           12 : 
     182           12 :     // plain downloading is rarely needed
     183           12 :     layer
     184           12 :         .download_and_keep_resident(&dl_ctx)
     185           12 :         .instrument(download_span)
     186           12 :         .await
     187           12 :         .unwrap();
     188           12 : 
     189           12 :     // last important part is deletion on drop: gc and compaction use it for compacted L0 layers
     190           12 :     // or fully garbage collected layers. deletion means deleting the local file, and scheduling a
     191           12 :     // deletion of the already unlinked from index_part.json remote file.
     192           12 :     //
     193           12 :     // marking a layer to be deleted on drop is irreversible; there is no technical reason against
     194           12 :     // reversiblity, but currently it is not needed so it is not provided.
     195           12 :     layer.delete_on_drop();
     196           12 : 
     197           12 :     let path = layer.local_path().to_owned();
     198           12 : 
     199           12 :     // wait_drop produces an unconnected to Layer future which will resolve when the
     200           12 :     // LayerInner::drop has completed.
     201           12 :     let mut wait_drop = std::pin::pin!(layer.wait_drop());
     202           12 : 
     203           12 :     // paused time doesn't really work well with timeouts and evict_and_wait, so delay pausing
     204           12 :     // until here
     205           12 :     tokio::time::pause();
     206           12 :     tokio::time::timeout(ADVANCE, &mut wait_drop)
     207           12 :         .await
     208           12 :         .expect_err("should had timed out because two strong references exist");
     209           12 : 
     210           12 :     tokio::fs::metadata(&path)
     211           12 :         .await
     212           12 :         .expect("the local layer file still exists");
     213           12 : 
     214           12 :     let rtc = &timeline.remote_client;
     215           12 : 
     216           12 :     // Simulate GC removing our test layer.
     217           12 :     {
     218           12 :         let mut g = timeline.layers.write().await;
     219           12 : 
     220           12 :         let layers = &[layer];
     221           12 :         g.open_mut().unwrap().finish_gc_timeline(layers);
     222           12 : 
     223           12 :         // this just updates the remote_physical_size for demonstration purposes
     224           12 :         rtc.schedule_gc_update(layers).unwrap();
     225           12 :     }
     226           12 : 
     227           12 :     // when strong references are dropped, the file is deleted and remote deletion is scheduled
     228           12 :     wait_drop.await;
     229           12 : 
     230           12 :     let e = tokio::fs::metadata(&path)
     231           12 :         .await
     232           12 :         .expect_err("the local file is deleted");
     233           12 :     assert_eq!(e.kind(), std::io::ErrorKind::NotFound);
     234           12 : 
     235           12 :     rtc.wait_completion().await.unwrap();
     236           12 : 
     237           12 :     assert_eq!(
     238           12 :         rtc.get_remote_physical_size(),
     239           12 :         dummy_layer.metadata().file_size
     240           12 :     );
     241           12 :     assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get())
     242           12 : }
     243              : 
     244              : /// This test demonstrates a previous hang when a eviction and deletion were requested at the same
     245              : /// time. Now both of them complete per Arc drop semantics.
     246              : #[tokio::test(start_paused = true)]
     247           12 : async fn evict_and_wait_on_wanted_deleted() {
     248           12 :     // this is the runtime on which Layer spawns the blocking tasks on
     249           12 :     let handle = tokio::runtime::Handle::current();
     250           12 : 
     251           12 :     let h = TenantHarness::create("evict_and_wait_on_wanted_deleted")
     252           12 :         .await
     253           12 :         .unwrap();
     254           12 :     utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
     255           12 :     let (tenant, ctx) = h.load().await;
     256           12 : 
     257           12 :     let timeline = tenant
     258           12 :         .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     259           12 :         .await
     260           12 :         .unwrap();
     261           12 : 
     262           12 :     let layer = {
     263           12 :         let mut layers = {
     264           12 :             let layers = timeline.layers.read().await;
     265           12 :             layers.likely_resident_layers().cloned().collect::<Vec<_>>()
     266           12 :         };
     267           12 : 
     268           12 :         assert_eq!(layers.len(), 1);
     269           12 : 
     270           12 :         layers.swap_remove(0)
     271           12 :     };
     272           12 : 
     273           12 :     // setup done
     274           12 : 
     275           12 :     let resident = layer.keep_resident().await.unwrap();
     276           12 : 
     277           12 :     {
     278           12 :         let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER));
     279           12 : 
     280           12 :         // drive the future to await on the status channel
     281           12 :         tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     282           12 :             .await
     283           12 :             .expect_err("should had been a timeout since we are holding the layer resident");
     284           12 : 
     285           12 :         layer.delete_on_drop();
     286           12 : 
     287           12 :         drop(resident);
     288           12 : 
     289           12 :         // make sure the eviction task gets to run
     290           12 :         SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await;
     291           12 : 
     292           12 :         let resident = layer.keep_resident().await;
     293           12 :         assert!(
     294           12 :             resident.is_none(),
     295           12 :             "keep_resident should not have re-initialized: {resident:?}"
     296           12 :         );
     297           12 : 
     298           12 :         evict_and_wait
     299           12 :             .await
     300           12 :             .expect("evict_and_wait should had succeeded");
     301           12 : 
     302           12 :         // works as intended
     303           12 :     }
     304           12 : 
     305           12 :     // assert that once we remove the `layer` from the layer map and drop our reference,
     306           12 :     // the deletion of the layer in remote_storage happens.
     307           12 :     {
     308           12 :         let mut layers = timeline.layers.write().await;
     309           12 :         layers.open_mut().unwrap().finish_gc_timeline(&[layer]);
     310           12 :     }
     311           12 : 
     312           12 :     SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await;
     313           12 : 
     314           12 :     assert_eq!(1, LAYER_IMPL_METRICS.started_deletes.get());
     315           12 :     assert_eq!(1, LAYER_IMPL_METRICS.completed_deletes.get());
     316           12 :     assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get());
     317           12 :     assert_eq!(1, LAYER_IMPL_METRICS.completed_evictions.get());
     318           12 :     assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get())
     319           12 : }
     320              : 
     321              : /// This test ensures we are able to read the layer while the layer eviction has been
     322              : /// started but not completed.
     323              : #[test]
     324           12 : fn read_wins_pending_eviction() {
     325           12 :     let rt = tokio::runtime::Builder::new_current_thread()
     326           12 :         .max_blocking_threads(1)
     327           12 :         .enable_all()
     328           12 :         .start_paused(true)
     329           12 :         .build()
     330           12 :         .unwrap();
     331           12 : 
     332           12 :     rt.block_on(async move {
     333           12 :         // this is the runtime on which Layer spawns the blocking tasks on
     334           12 :         let handle = tokio::runtime::Handle::current();
     335           12 :         let h = TenantHarness::create("read_wins_pending_eviction")
     336           12 :             .await
     337           12 :             .unwrap();
     338           12 :         let (tenant, ctx) = h.load().await;
     339           12 :         let span = h.span();
     340           12 :         let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
     341              : 
     342           12 :         let timeline = tenant
     343           12 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     344           12 :             .await
     345           12 :             .unwrap();
     346           12 :         let ctx = ctx.with_scope_timeline(&timeline);
     347              : 
     348           12 :         let layer = {
     349           12 :             let mut layers = {
     350           12 :                 let layers = timeline.layers.read().await;
     351           12 :                 layers.likely_resident_layers().cloned().collect::<Vec<_>>()
     352           12 :             };
     353           12 : 
     354           12 :             assert_eq!(layers.len(), 1);
     355              : 
     356           12 :             layers.swap_remove(0)
     357              :         };
     358              : 
     359              :         // setup done
     360              : 
     361           12 :         let resident = layer.keep_resident().await.unwrap();
     362           12 : 
     363           12 :         let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER));
     364           12 : 
     365           12 :         // drive the future to await on the status channel
     366           12 :         tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     367           12 :             .await
     368           12 :             .expect_err("should had been a timeout since we are holding the layer resident");
     369           12 :         assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get());
     370              : 
     371           12 :         let (completion, barrier) = utils::completion::channel();
     372           12 :         let (arrival, arrived_at_barrier) = utils::completion::channel();
     373           12 :         layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting(
     374           12 :             Some(arrival),
     375           12 :             barrier,
     376           12 :         ));
     377           12 : 
     378           12 :         // now the eviction cannot proceed because the threads are consumed while completion exists
     379           12 :         drop(resident);
     380           12 :         arrived_at_barrier.wait().await;
     381           12 :         assert!(!layer.is_likely_resident());
     382              : 
     383              :         // because no actual eviction happened, we get to just reinitialize the DownloadedLayer
     384           12 :         layer
     385           12 :             .0
     386           12 :             .get_or_maybe_download(false, &ctx)
     387           12 :             .instrument(download_span)
     388           12 :             .await
     389           12 :             .expect("should had reinitialized without downloading");
     390           12 : 
     391           12 :         assert!(layer.is_likely_resident());
     392              : 
     393              :         // reinitialization notifies of new resident status, which should error out all evict_and_wait
     394           12 :         let e = tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     395           12 :             .await
     396           12 :             .expect("no timeout, because get_or_maybe_download re-initialized")
     397           12 :             .expect_err("eviction should not have succeeded because re-initialized");
     398           12 : 
     399           12 :         // works as intended: evictions lose to "downloads"
     400           12 :         assert!(matches!(e, EvictionError::Downloaded), "{e:?}");
     401           12 :         assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get());
     402              : 
     403              :         // this is not wrong: the eviction is technically still "on the way" as it's still queued
     404              :         // because of a failpoint
     405           12 :         assert_eq!(
     406           12 :             0,
     407           12 :             LAYER_IMPL_METRICS
     408           12 :                 .cancelled_evictions
     409           12 :                 .values()
     410          108 :                 .map(|ctr| ctr.get())
     411           12 :                 .sum::<u64>()
     412           12 :         );
     413              : 
     414           12 :         drop(completion);
     415           12 : 
     416           12 :         tokio::time::sleep(ADVANCE).await;
     417           12 :         SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0(&handle, 1)
     418           12 :             .await;
     419              : 
     420           12 :         assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get());
     421              : 
     422              :         // now we finally can observe the original eviction failing
     423              :         // it would had been possible to observe it earlier, but here it is guaranteed to have
     424              :         // happened.
     425           12 :         assert_eq!(
     426           12 :             1,
     427           12 :             LAYER_IMPL_METRICS
     428           12 :                 .cancelled_evictions
     429           12 :                 .values()
     430          108 :                 .map(|ctr| ctr.get())
     431           12 :                 .sum::<u64>()
     432           12 :         );
     433              : 
     434           12 :         assert_eq!(
     435           12 :             1,
     436           12 :             LAYER_IMPL_METRICS.cancelled_evictions[EvictionCancelled::AlreadyReinitialized].get()
     437           12 :         );
     438              : 
     439           12 :         assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get())
     440           12 :     });
     441           12 : }
     442              : 
     443              : /// Use failpoint to delay an eviction starting to get a VersionCheckFailed.
     444              : #[test]
     445           12 : fn multiple_pending_evictions_in_order() {
     446           12 :     let name = "multiple_pending_evictions_in_order";
     447           12 :     let in_order = true;
     448           12 :     multiple_pending_evictions_scenario(name, in_order);
     449           12 : }
     450              : 
     451              : /// Use failpoint to reorder later eviction before first to get a UnexpectedEvictedState.
     452              : #[test]
     453           12 : fn multiple_pending_evictions_out_of_order() {
     454           12 :     let name = "multiple_pending_evictions_out_of_order";
     455           12 :     let in_order = false;
     456           12 :     multiple_pending_evictions_scenario(name, in_order);
     457           12 : }
     458              : 
     459           24 : fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) {
     460           24 :     let rt = tokio::runtime::Builder::new_current_thread()
     461           24 :         .max_blocking_threads(1)
     462           24 :         .enable_all()
     463           24 :         .start_paused(true)
     464           24 :         .build()
     465           24 :         .unwrap();
     466           24 : 
     467           24 :     rt.block_on(async move {
     468           24 :         // this is the runtime on which Layer spawns the blocking tasks on
     469           24 :         let handle = tokio::runtime::Handle::current();
     470           24 :         let h = TenantHarness::create(name).await.unwrap();
     471           24 :         let (tenant, ctx) = h.load().await;
     472           24 :         let span = h.span();
     473           24 :         let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
     474              : 
     475           24 :         let timeline = tenant
     476           24 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     477           24 :             .await
     478           24 :             .unwrap();
     479           24 :         let ctx = ctx.with_scope_timeline(&timeline);
     480              : 
     481           24 :         let layer = {
     482           24 :             let mut layers = {
     483           24 :                 let layers = timeline.layers.read().await;
     484           24 :                 layers.likely_resident_layers().cloned().collect::<Vec<_>>()
     485           24 :             };
     486           24 : 
     487           24 :             assert_eq!(layers.len(), 1);
     488              : 
     489           24 :             layers.swap_remove(0)
     490              :         };
     491              : 
     492              :         // setup done
     493              : 
     494           24 :         let resident = layer.keep_resident().await.unwrap();
     495           24 : 
     496           24 :         let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER));
     497           24 : 
     498           24 :         // drive the future to await on the status channel
     499           24 :         tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     500           24 :             .await
     501           24 :             .expect_err("should had been a timeout since we are holding the layer resident");
     502           24 :         assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get());
     503              : 
     504           24 :         let (completion1, barrier) = utils::completion::channel();
     505           24 :         let mut completion1 = Some(completion1);
     506           24 :         let (arrival, arrived_at_barrier) = utils::completion::channel();
     507           24 :         layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting(
     508           24 :             Some(arrival),
     509           24 :             barrier,
     510           24 :         ));
     511           24 : 
     512           24 :         // now the eviction cannot proceed because we are simulating arbitrary long delay for the
     513           24 :         // eviction task start.
     514           24 :         drop(resident);
     515           24 :         assert!(!layer.is_likely_resident());
     516              : 
     517           24 :         arrived_at_barrier.wait().await;
     518              : 
     519              :         // because no actual eviction happened, we get to just reinitialize the DownloadedLayer
     520           24 :         layer
     521           24 :             .0
     522           24 :             .get_or_maybe_download(false, &ctx)
     523           24 :             .instrument(download_span)
     524           24 :             .await
     525           24 :             .expect("should had reinitialized without downloading");
     526           24 : 
     527           24 :         assert!(layer.is_likely_resident());
     528              : 
     529              :         // reinitialization notifies of new resident status, which should error out all evict_and_wait
     530           24 :         let e = tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     531           24 :             .await
     532           24 :             .expect("no timeout, because get_or_maybe_download re-initialized")
     533           24 :             .expect_err("eviction should not have succeeded because re-initialized");
     534           24 : 
     535           24 :         // works as intended: evictions lose to "downloads"
     536           24 :         assert!(matches!(e, EvictionError::Downloaded), "{e:?}");
     537           24 :         assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get());
     538              : 
     539              :         // this is not wrong: the eviction is technically still "on the way" as it's still queued
     540              :         // because of a failpoint
     541           24 :         assert_eq!(
     542           24 :             0,
     543           24 :             LAYER_IMPL_METRICS
     544           24 :                 .cancelled_evictions
     545           24 :                 .values()
     546          216 :                 .map(|ctr| ctr.get())
     547           24 :                 .sum::<u64>()
     548           24 :         );
     549              : 
     550           24 :         assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get());
     551              : 
     552              :         // configure another failpoint for the second eviction -- evictions are per initialization,
     553              :         // so now that we've reinitialized the inner, we get to run two of them at the same time.
     554           24 :         let (completion2, barrier) = utils::completion::channel();
     555           24 :         let (arrival, arrived_at_barrier) = utils::completion::channel();
     556           24 :         layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting(
     557           24 :             Some(arrival),
     558           24 :             barrier,
     559           24 :         ));
     560           24 : 
     561           24 :         let mut second_eviction = std::pin::pin!(layer.evict_and_wait(FOREVER));
     562           24 : 
     563           24 :         // advance to the wait on the queue
     564           24 :         tokio::time::timeout(ADVANCE, &mut second_eviction)
     565           24 :             .await
     566           24 :             .expect_err("timeout because failpoint is blocking");
     567           24 : 
     568           24 :         arrived_at_barrier.wait().await;
     569              : 
     570           24 :         assert_eq!(2, LAYER_IMPL_METRICS.started_evictions.get());
     571              : 
     572           24 :         let mut release_earlier_eviction = |expected_reason| {
     573           24 :             assert_eq!(
     574           24 :                 0,
     575           24 :                 LAYER_IMPL_METRICS.cancelled_evictions[expected_reason].get(),
     576           24 :             );
     577              : 
     578           24 :             drop(completion1.take().unwrap());
     579           24 : 
     580           24 :             let handle = &handle;
     581              : 
     582           24 :             async move {
     583           24 :                 tokio::time::sleep(ADVANCE).await;
     584           24 :                 SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0(
     585           24 :                     handle, 1,
     586           24 :                 )
     587           24 :                 .await;
     588              : 
     589           24 :                 assert_eq!(
     590           24 :                     1,
     591           24 :                     LAYER_IMPL_METRICS.cancelled_evictions[expected_reason].get(),
     592           24 :                 );
     593           24 :             }
     594           24 :         };
     595              : 
     596           24 :         if in_order {
     597           12 :             release_earlier_eviction(EvictionCancelled::VersionCheckFailed).await;
     598           12 :         }
     599              : 
     600              :         // release the later eviction which is for the current version
     601           24 :         drop(completion2);
     602           24 :         tokio::time::sleep(ADVANCE).await;
     603           24 :         SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0(&handle, 1)
     604           24 :             .await;
     605              : 
     606           24 :         if !in_order {
     607           12 :             release_earlier_eviction(EvictionCancelled::UnexpectedEvictedState).await;
     608           12 :         }
     609              : 
     610           24 :         tokio::time::timeout(ADVANCE, &mut second_eviction)
     611           24 :             .await
     612           24 :             .expect("eviction goes through now that spawn_blocking is unclogged")
     613           24 :             .expect("eviction should succeed, because version matches");
     614           24 : 
     615           24 :         assert_eq!(1, LAYER_IMPL_METRICS.completed_evictions.get());
     616              : 
     617              :         // ensure the cancelled are unchanged
     618           24 :         assert_eq!(
     619           24 :             1,
     620           24 :             LAYER_IMPL_METRICS
     621           24 :                 .cancelled_evictions
     622           24 :                 .values()
     623          216 :                 .map(|ctr| ctr.get())
     624           24 :                 .sum::<u64>()
     625           24 :         );
     626              : 
     627           24 :         assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get())
     628           24 :     });
     629           24 : }
     630              : 
     631              : /// The test ensures with a failpoint that a pending eviction is not cancelled by what is currently
     632              : /// a `Layer::keep_resident` call.
     633              : ///
     634              : /// This matters because cancelling the eviction would leave us in a state where the file is on
     635              : /// disk but the layer internal state says it has not been initialized. Futhermore, it allows us to
     636              : /// have non-repairing `Layer::is_likely_resident`.
     637              : #[tokio::test(start_paused = true)]
     638           12 : async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() {
     639           12 :     let handle = tokio::runtime::Handle::current();
     640           12 :     let h = TenantHarness::create("cancelled_get_or_maybe_download_does_not_cancel_eviction")
     641           12 :         .await
     642           12 :         .unwrap();
     643           12 :     let (tenant, ctx) = h.load().await;
     644           12 : 
     645           12 :     let timeline = tenant
     646           12 :         .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     647           12 :         .await
     648           12 :         .unwrap();
     649           12 :     let ctx = ctx.with_scope_timeline(&timeline);
     650           12 : 
     651           12 :     // This test does downloads
     652           12 :     let ctx = RequestContextBuilder::from(&ctx)
     653           12 :         .download_behavior(DownloadBehavior::Download)
     654           12 :         .attached_child();
     655           12 : 
     656           12 :     let layer = {
     657           12 :         let mut layers = {
     658           12 :             let layers = timeline.layers.read().await;
     659           12 :             layers.likely_resident_layers().cloned().collect::<Vec<_>>()
     660           12 :         };
     661           12 : 
     662           12 :         assert_eq!(layers.len(), 1);
     663           12 : 
     664           12 :         layers.swap_remove(0)
     665           12 :     };
     666           12 : 
     667           12 :     // this failpoint will simulate the `get_or_maybe_download` becoming cancelled (by returning an
     668           12 :     // Err) at the right time as in "during" the `LayerInner::needs_download`.
     669           12 :     layer.enable_failpoint(Failpoint::AfterDeterminingLayerNeedsNoDownload);
     670           12 : 
     671           12 :     let (completion, barrier) = utils::completion::channel();
     672           12 :     let (arrival, arrived_at_barrier) = utils::completion::channel();
     673           12 : 
     674           12 :     layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting(
     675           12 :         Some(arrival),
     676           12 :         barrier,
     677           12 :     ));
     678           12 : 
     679           12 :     tokio::time::timeout(ADVANCE, layer.evict_and_wait(FOREVER))
     680           12 :         .await
     681           12 :         .expect_err("should had advanced to waiting on channel");
     682           12 : 
     683           12 :     arrived_at_barrier.wait().await;
     684           12 : 
     685           12 :     // simulate a cancelled read which is cancelled before it gets to re-initialize
     686           12 :     let e = layer
     687           12 :         .0
     688           12 :         .get_or_maybe_download(false, &ctx)
     689           12 :         .await
     690           12 :         .unwrap_err();
     691           12 :     assert!(
     692           12 :         matches!(
     693           12 :             e,
     694           12 :             DownloadError::Failpoint(FailpointKind::AfterDeterminingLayerNeedsNoDownload)
     695           12 :         ),
     696           12 :         "{e:?}"
     697           12 :     );
     698           12 : 
     699           12 :     assert!(
     700           12 :         layer.0.needs_download().await.unwrap().is_none(),
     701           12 :         "file is still on disk"
     702           12 :     );
     703           12 : 
     704           12 :     // release the eviction task
     705           12 :     drop(completion);
     706           12 :     tokio::time::sleep(ADVANCE).await;
     707           12 :     SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await;
     708           12 : 
     709           12 :     // failpoint is still enabled, but it is not hit
     710           12 :     let e = layer
     711           12 :         .0
     712           12 :         .get_or_maybe_download(false, &ctx)
     713           12 :         .await
     714           12 :         .unwrap_err();
     715           12 :     assert!(matches!(e, DownloadError::DownloadRequired), "{e:?}");
     716           12 : 
     717           12 :     // failpoint is not counted as cancellation either
     718           12 :     assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get())
     719           12 : }
     720              : 
     721              : #[tokio::test(start_paused = true)]
     722           12 : async fn evict_and_wait_does_not_wait_for_download() {
     723           12 :     // let handle = tokio::runtime::Handle::current();
     724           12 :     let h = TenantHarness::create("evict_and_wait_does_not_wait_for_download")
     725           12 :         .await
     726           12 :         .unwrap();
     727           12 :     let (tenant, ctx) = h.load().await;
     728           12 :     let span = h.span();
     729           12 :     let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
     730           12 : 
     731           12 :     let timeline = tenant
     732           12 :         .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     733           12 :         .await
     734           12 :         .unwrap();
     735           12 :     let ctx = ctx.with_scope_timeline(&timeline);
     736           12 : 
     737           12 :     // This test does downloads
     738           12 :     let ctx = RequestContextBuilder::from(&ctx)
     739           12 :         .download_behavior(DownloadBehavior::Download)
     740           12 :         .attached_child();
     741           12 : 
     742           12 :     let layer = {
     743           12 :         let mut layers = {
     744           12 :             let layers = timeline.layers.read().await;
     745           12 :             layers.likely_resident_layers().cloned().collect::<Vec<_>>()
     746           12 :         };
     747           12 : 
     748           12 :         assert_eq!(layers.len(), 1);
     749           12 : 
     750           12 :         layers.swap_remove(0)
     751           12 :     };
     752           12 : 
     753           12 :     // kind of forced setup: start an eviction but do not allow it progress until we are
     754           12 :     // downloading
     755           12 :     let (eviction_can_continue, barrier) = utils::completion::channel();
     756           12 :     let (arrival, eviction_arrived) = utils::completion::channel();
     757           12 :     layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting(
     758           12 :         Some(arrival),
     759           12 :         barrier,
     760           12 :     ));
     761           12 : 
     762           12 :     let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER));
     763           12 : 
     764           12 :     // use this once-awaited other_evict to synchronize with the eviction
     765           12 :     let other_evict = layer.evict_and_wait(FOREVER);
     766           12 : 
     767           12 :     tokio::time::timeout(ADVANCE, &mut evict_and_wait)
     768           12 :         .await
     769           12 :         .expect_err("should had advanced");
     770           12 :     eviction_arrived.wait().await;
     771           12 :     drop(eviction_can_continue);
     772           12 :     other_evict.await.unwrap();
     773           12 : 
     774           12 :     // now the layer is evicted, and the "evict_and_wait" is waiting on the receiver
     775           12 :     assert!(!layer.is_likely_resident());
     776           12 : 
     777           12 :     // following new evict_and_wait will fail until we've completed the download
     778           12 :     let e = layer.evict_and_wait(FOREVER).await.unwrap_err();
     779           12 :     assert!(matches!(e, EvictionError::NotFound), "{e:?}");
     780           12 : 
     781           12 :     let (download_can_continue, barrier) = utils::completion::channel();
     782           12 :     let (arrival, _download_arrived) = utils::completion::channel();
     783           12 :     layer.enable_failpoint(Failpoint::WaitBeforeDownloading(Some(arrival), barrier));
     784           12 : 
     785           12 :     let mut download = std::pin::pin!(
     786           12 :         layer
     787           12 :             .0
     788           12 :             .get_or_maybe_download(true, &ctx)
     789           12 :             .instrument(download_span)
     790           12 :     );
     791           12 : 
     792           12 :     assert!(
     793           12 :         !layer.is_likely_resident(),
     794           12 :         "during download layer is evicted"
     795           12 :     );
     796           12 : 
     797           12 :     tokio::time::timeout(ADVANCE, &mut download)
     798           12 :         .await
     799           12 :         .expect_err("should had timed out because of failpoint");
     800           12 : 
     801           12 :     // now we finally get to continue, and because the latest state is downloading, we deduce that
     802           12 :     // original eviction succeeded
     803           12 :     evict_and_wait.await.unwrap();
     804           12 : 
     805           12 :     // however a new evict_and_wait will fail
     806           12 :     let e = layer.evict_and_wait(FOREVER).await.unwrap_err();
     807           12 :     assert!(matches!(e, EvictionError::NotFound), "{e:?}");
     808           12 : 
     809           12 :     assert!(!layer.is_likely_resident());
     810           12 : 
     811           12 :     drop(download_can_continue);
     812           12 :     download.await.expect("download should had succeeded");
     813           12 :     assert!(layer.is_likely_resident());
     814           12 : 
     815           12 :     // only now can we evict
     816           12 :     layer.evict_and_wait(FOREVER).await.unwrap();
     817           12 : }
     818              : 
     819              : /// Asserts that there is no miscalculation when Layer is dropped while it is being kept resident,
     820              : /// which is the last value.
     821              : ///
     822              : /// Also checks that the same does not happen on a non-evicted layer (regression test).
     823              : #[tokio::test(start_paused = true)]
     824           12 : async fn eviction_cancellation_on_drop() {
     825           12 :     use bytes::Bytes;
     826           12 :     use pageserver_api::value::Value;
     827           12 : 
     828           12 :     // this is the runtime on which Layer spawns the blocking tasks on
     829           12 :     let handle = tokio::runtime::Handle::current();
     830           12 : 
     831           12 :     let h = TenantHarness::create("eviction_cancellation_on_drop")
     832           12 :         .await
     833           12 :         .unwrap();
     834           12 :     utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
     835           12 :     let (tenant, ctx) = h.load().await;
     836           12 : 
     837           12 :     let timeline = tenant
     838           12 :         .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
     839           12 :         .await
     840           12 :         .unwrap();
     841           12 : 
     842           12 :     {
     843           12 :         // create_test_timeline wrote us one layer, write another
     844           12 :         let mut writer = timeline.writer().await;
     845           12 :         writer
     846           12 :             .put(
     847           12 :                 pageserver_api::key::Key::from_i128(5),
     848           12 :                 Lsn(0x20),
     849           12 :                 &Value::Image(Bytes::from_static(b"this does not matter either")),
     850           12 :                 &ctx,
     851           12 :             )
     852           12 :             .await
     853           12 :             .unwrap();
     854           12 : 
     855           12 :         writer.finish_write(Lsn(0x20));
     856           12 :     }
     857           12 : 
     858           12 :     timeline.freeze_and_flush().await.unwrap();
     859           12 : 
     860           12 :     // wait for the upload to complete so our Arc::strong_count assertion holds
     861           12 :     timeline.remote_client.wait_completion().await.unwrap();
     862           12 : 
     863           12 :     let (evicted_layer, not_evicted) = {
     864           12 :         let mut layers = {
     865           12 :             let mut guard = timeline.layers.write().await;
     866           12 :             let layers = guard.likely_resident_layers().cloned().collect::<Vec<_>>();
     867           12 :             // remove the layers from layermap
     868           12 :             guard.open_mut().unwrap().finish_gc_timeline(&layers);
     869           12 : 
     870           12 :             layers
     871           12 :         };
     872           12 : 
     873           12 :         assert_eq!(layers.len(), 2);
     874           12 : 
     875           12 :         (layers.pop().unwrap(), layers.pop().unwrap())
     876           12 :     };
     877           12 : 
     878           12 :     let victims = [(evicted_layer, true), (not_evicted, false)];
     879           12 : 
     880           36 :     for (victim, evict) in victims {
     881           24 :         let resident = victim.keep_resident().await.unwrap();
     882           24 :         drop(victim);
     883           24 : 
     884           24 :         assert_eq!(Arc::strong_count(&resident.owner.0), 1);
     885           12 : 
     886           24 :         if evict {
     887           12 :             let evict_and_wait = resident.owner.evict_and_wait(FOREVER);
     888           12 : 
     889           12 :             // drive the future to await on the status channel, and then drop it
     890           12 :             tokio::time::timeout(ADVANCE, evict_and_wait)
     891           12 :                 .await
     892           12 :                 .expect_err("should had been a timeout since we are holding the layer resident");
     893           12 :         }
     894           12 : 
     895           12 :         // 1 == we only evict one of the layers
     896           24 :         assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get());
     897           12 : 
     898           24 :         drop(resident);
     899           24 : 
     900           24 :         // run any spawned
     901           24 :         tokio::time::sleep(ADVANCE).await;
     902           12 : 
     903           24 :         SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await;
     904           12 : 
     905           24 :         assert_eq!(
     906           24 :             1,
     907           24 :             LAYER_IMPL_METRICS.cancelled_evictions[EvictionCancelled::LayerGone].get()
     908           24 :         );
     909           12 :     }
     910           12 : }
     911              : 
     912              : /// A test case to remind you the cost of these structures. You can bump the size limit
     913              : /// below if it is really necessary to add more fields to the structures.
     914              : #[test]
     915              : #[cfg(target_arch = "x86_64")]
     916           12 : fn layer_size() {
     917           12 :     assert_eq!(size_of::<LayerAccessStats>(), 8);
     918           12 :     assert_eq!(size_of::<PersistentLayerDesc>(), 104);
     919           12 :     assert_eq!(size_of::<LayerInner>(), 296);
     920              :     // it also has the utf8 path
     921           12 : }
     922              : 
     923              : struct SpawnBlockingPoolHelper {
     924              :     awaited_by_spawn_blocking_tasks: Completion,
     925              :     blocking_tasks: JoinSet<()>,
     926              : }
     927              : 
     928              : impl SpawnBlockingPoolHelper {
     929              :     /// All `crate::task_mgr::BACKGROUND_RUNTIME` spawn_blocking threads will be consumed until
     930              :     /// release is called.
     931              :     ///
     932              :     /// In the tests this can be used to ensure something cannot be started on the target runtimes
     933              :     /// spawn_blocking pool.
     934              :     ///
     935              :     /// This should be no issue nowdays, because nextest runs each test in it's own process.
     936           12 :     async fn consume_all_spawn_blocking_threads(handle: &tokio::runtime::Handle) -> Self {
     937           12 :         let default_max_blocking_threads = 512;
     938           12 : 
     939           12 :         Self::consume_all_spawn_blocking_threads0(handle, default_max_blocking_threads).await
     940           12 :     }
     941              : 
     942          156 :     async fn consume_all_spawn_blocking_threads0(
     943          156 :         handle: &tokio::runtime::Handle,
     944          156 :         threads: usize,
     945          156 :     ) -> Self {
     946          156 :         assert_ne!(threads, 0);
     947              : 
     948          156 :         let (completion, barrier) = completion::channel();
     949          156 :         let (started, starts_completed) = completion::channel();
     950          156 : 
     951          156 :         let mut blocking_tasks = JoinSet::new();
     952          156 : 
     953        43080 :         for _ in 0..threads {
     954        43080 :             let barrier = barrier.clone();
     955        43080 :             let started = started.clone();
     956        43080 :             blocking_tasks.spawn_blocking_on(
     957        43080 :                 move || {
     958        43080 :                     drop(started);
     959        43080 :                     tokio::runtime::Handle::current().block_on(barrier.wait());
     960        43080 :                 },
     961        43080 :                 handle,
     962        43080 :             );
     963        43080 :         }
     964              : 
     965          156 :         drop(started);
     966          156 : 
     967          156 :         starts_completed.wait().await;
     968              : 
     969          156 :         drop(barrier);
     970          156 : 
     971          156 :         tracing::trace!("consumed all threads");
     972              : 
     973          156 :         SpawnBlockingPoolHelper {
     974          156 :             awaited_by_spawn_blocking_tasks: completion,
     975          156 :             blocking_tasks,
     976          156 :         }
     977          156 :     }
     978              : 
     979              :     /// Release all previously blocked spawn_blocking threads
     980          156 :     async fn release(self) {
     981          156 :         let SpawnBlockingPoolHelper {
     982          156 :             awaited_by_spawn_blocking_tasks,
     983          156 :             mut blocking_tasks,
     984          156 :         } = self;
     985          156 : 
     986          156 :         drop(awaited_by_spawn_blocking_tasks);
     987              : 
     988        43236 :         while let Some(res) = blocking_tasks.join_next().await {
     989        43080 :             res.expect("none of the tasks should had panicked");
     990        43080 :         }
     991              : 
     992          156 :         tracing::trace!("released all threads");
     993          156 :     }
     994              : 
     995              :     /// In the tests it is used as an easy way of making sure something scheduled on the target
     996              :     /// runtimes `spawn_blocking` has completed, because it must've been scheduled and completed
     997              :     /// before our tasks have a chance to schedule and complete.
     998           72 :     async fn consume_and_release_all_of_spawn_blocking_threads(handle: &tokio::runtime::Handle) {
     999           72 :         Self::consume_and_release_all_of_spawn_blocking_threads0(handle, 512).await
    1000           72 :     }
    1001              : 
    1002          132 :     async fn consume_and_release_all_of_spawn_blocking_threads0(
    1003          132 :         handle: &tokio::runtime::Handle,
    1004          132 :         threads: usize,
    1005          132 :     ) {
    1006          132 :         Self::consume_all_spawn_blocking_threads0(handle, threads)
    1007          132 :             .await
    1008          132 :             .release()
    1009          132 :             .await
    1010          132 :     }
    1011              : }
    1012              : 
    1013              : #[test]
    1014           12 : fn spawn_blocking_pool_helper_actually_works() {
    1015           12 :     // create a custom runtime for which we know and control how many blocking threads it has
    1016           12 :     //
    1017           12 :     // because the amount is not configurable for our helper, expect the same amount as
    1018           12 :     // BACKGROUND_RUNTIME using the tokio defaults would have.
    1019           12 :     let rt = tokio::runtime::Builder::new_current_thread()
    1020           12 :         .max_blocking_threads(1)
    1021           12 :         .enable_all()
    1022           12 :         .build()
    1023           12 :         .unwrap();
    1024           12 : 
    1025           12 :     let handle = rt.handle();
    1026           12 : 
    1027           12 :     rt.block_on(async move {
    1028              :         // this will not return until all threads are spun up and actually executing the code
    1029              :         // waiting on `consumed` to be `SpawnBlockingPoolHelper::release`'d.
    1030           12 :         let consumed =
    1031           12 :             SpawnBlockingPoolHelper::consume_all_spawn_blocking_threads0(handle, 1).await;
    1032              : 
    1033           12 :         println!("consumed");
    1034           12 : 
    1035           12 :         let mut jh = std::pin::pin!(tokio::task::spawn_blocking(move || {
    1036           12 :             // this will not get to run before we release
    1037           12 :         }));
    1038           12 : 
    1039           12 :         println!("spawned");
    1040           12 : 
    1041           12 :         tokio::time::timeout(std::time::Duration::from_secs(1), &mut jh)
    1042           12 :             .await
    1043           12 :             .expect_err("the task should not have gotten to run yet");
    1044           12 : 
    1045           12 :         println!("tried to join");
    1046           12 : 
    1047           12 :         consumed.release().await;
    1048              : 
    1049           12 :         println!("released");
    1050           12 : 
    1051           12 :         tokio::time::timeout(std::time::Duration::from_secs(1), jh)
    1052           12 :             .await
    1053           12 :             .expect("no timeout")
    1054           12 :             .expect("no join error");
    1055           12 : 
    1056           12 :         println!("joined");
    1057           12 :     });
    1058           12 : }
    1059              : 
    1060              : /// Drop the low bits from a time, to emulate the precision loss in LayerAccessStats
    1061           48 : fn lowres_time(hires: SystemTime) -> SystemTime {
    1062           48 :     let ts = hires.duration_since(UNIX_EPOCH).unwrap().as_secs();
    1063           48 :     UNIX_EPOCH + Duration::from_secs(ts)
    1064           48 : }
    1065              : 
    1066              : #[test]
    1067           12 : fn access_stats() {
    1068           12 :     let access_stats = LayerAccessStats::default();
    1069           12 :     // Default is visible
    1070           12 :     assert_eq!(access_stats.visibility(), LayerVisibilityHint::Visible);
    1071              : 
    1072           12 :     access_stats.set_visibility(LayerVisibilityHint::Covered);
    1073           12 :     assert_eq!(access_stats.visibility(), LayerVisibilityHint::Covered);
    1074           12 :     access_stats.set_visibility(LayerVisibilityHint::Visible);
    1075           12 :     assert_eq!(access_stats.visibility(), LayerVisibilityHint::Visible);
    1076              : 
    1077           12 :     let rtime = UNIX_EPOCH + Duration::from_secs(2000000000);
    1078           12 :     access_stats.record_residence_event_at(rtime);
    1079           12 :     assert_eq!(access_stats.latest_activity(), lowres_time(rtime));
    1080              : 
    1081           12 :     let atime = UNIX_EPOCH + Duration::from_secs(2100000000);
    1082           12 :     access_stats.record_access_at(atime);
    1083           12 :     assert_eq!(access_stats.latest_activity(), lowres_time(atime));
    1084              : 
    1085              :     // Setting visibility doesn't clobber access time
    1086           12 :     access_stats.set_visibility(LayerVisibilityHint::Covered);
    1087           12 :     assert_eq!(access_stats.latest_activity(), lowres_time(atime));
    1088           12 :     access_stats.set_visibility(LayerVisibilityHint::Visible);
    1089           12 :     assert_eq!(access_stats.latest_activity(), lowres_time(atime));
    1090              : 
    1091              :     // Recording access implicitly makes layer visible, if it wasn't already
    1092           12 :     let atime = UNIX_EPOCH + Duration::from_secs(2200000000);
    1093           12 :     access_stats.set_visibility(LayerVisibilityHint::Covered);
    1094           12 :     assert_eq!(access_stats.visibility(), LayerVisibilityHint::Covered);
    1095           12 :     assert!(access_stats.record_access_at(atime));
    1096           12 :     access_stats.set_visibility(LayerVisibilityHint::Visible);
    1097           12 :     assert!(!access_stats.record_access_at(atime));
    1098           12 :     access_stats.set_visibility(LayerVisibilityHint::Visible);
    1099           12 : }
    1100              : 
    1101              : #[test]
    1102           12 : fn access_stats_2038() {
    1103           12 :     // The access stats structure uses a timestamp representation that will run out
    1104           12 :     // of bits in 2038.  One year before that, this unit test will start failing.
    1105           12 : 
    1106           12 :     let one_year_from_now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap()
    1107           12 :         + Duration::from_secs(3600 * 24 * 365);
    1108           12 : 
    1109           12 :     assert!(one_year_from_now.as_secs() < (2 << 31));
    1110           12 : }
        

Generated by: LCOV version 2.1-beta