LCOV - code coverage report
Current view: top level - pageserver/src/tenant - timeline.rs (source / functions) Coverage Total Hit
Test: 496e96cdfff2df79370229591d6427cda12fde29.info Lines: 59.1 % 3148 1861
Test Date: 2024-05-21 18:28:29 Functions: 56.5 % 299 169

            Line data    Source code
       1              : mod compaction;
       2              : pub mod delete;
       3              : pub(crate) mod detach_ancestor;
       4              : mod eviction_task;
       5              : mod init;
       6              : pub mod layer_manager;
       7              : pub(crate) mod logical_size;
       8              : pub mod span;
       9              : pub mod uninit;
      10              : mod walreceiver;
      11              : 
      12              : use anyhow::{anyhow, bail, ensure, Context, Result};
      13              : use arc_swap::ArcSwap;
      14              : use bytes::Bytes;
      15              : use camino::Utf8Path;
      16              : use enumset::EnumSet;
      17              : use fail::fail_point;
      18              : use once_cell::sync::Lazy;
      19              : use pageserver_api::{
      20              :     key::{
      21              :         AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX,
      22              :         NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE,
      23              :     },
      24              :     keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
      25              :     models::{
      26              :         AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo,
      27              :         DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo,
      28              :         TimelineState,
      29              :     },
      30              :     reltag::BlockNumber,
      31              :     shard::{ShardIdentity, ShardNumber, TenantShardId},
      32              : };
      33              : use rand::Rng;
      34              : use serde_with::serde_as;
      35              : use storage_broker::BrokerClientChannel;
      36              : use tokio::{
      37              :     runtime::Handle,
      38              :     sync::{oneshot, watch},
      39              : };
      40              : use tokio_util::sync::CancellationToken;
      41              : use tracing::*;
      42              : use utils::{
      43              :     bin_ser::BeSer,
      44              :     sync::gate::{Gate, GateGuard},
      45              :     vec_map::VecMap,
      46              : };
      47              : 
      48              : use std::ops::{Deref, Range};
      49              : use std::pin::pin;
      50              : use std::sync::atomic::Ordering as AtomicOrdering;
      51              : use std::sync::{Arc, Mutex, RwLock, Weak};
      52              : use std::time::{Duration, Instant, SystemTime};
      53              : use std::{
      54              :     array,
      55              :     collections::{BTreeMap, HashMap, HashSet},
      56              :     sync::atomic::AtomicU64,
      57              : };
      58              : use std::{
      59              :     cmp::{max, min, Ordering},
      60              :     ops::ControlFlow,
      61              : };
      62              : 
      63              : use crate::{
      64              :     aux_file::AuxFileSizeEstimator,
      65              :     tenant::{
      66              :         layer_map::{LayerMap, SearchResult},
      67              :         metadata::TimelineMetadata,
      68              :     },
      69              : };
      70              : use crate::{
      71              :     context::{DownloadBehavior, RequestContext},
      72              :     disk_usage_eviction_task::DiskUsageEvictionInfo,
      73              :     pgdatadir_mapping::CollectKeySpaceError,
      74              : };
      75              : use crate::{deletion_queue::DeletionQueueClient, metrics::GetKind};
      76              : use crate::{
      77              :     disk_usage_eviction_task::finite_f32,
      78              :     tenant::storage_layer::{
      79              :         AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
      80              :         LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructResult,
      81              :         ValueReconstructState, ValuesReconstructState,
      82              :     },
      83              : };
      84              : use crate::{
      85              :     disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
      86              : };
      87              : use crate::{
      88              :     metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
      89              : };
      90              : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
      91              : use crate::{
      92              :     pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::timeline::init::LocalLayerFileMetadata,
      93              : };
      94              : use crate::{
      95              :     pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
      96              :     virtual_file::{MaybeFatalIo, VirtualFile},
      97              : };
      98              : 
      99              : use crate::config::PageServerConf;
     100              : use crate::keyspace::{KeyPartitioning, KeySpace};
     101              : use crate::metrics::{
     102              :     TimelineMetrics, MATERIALIZED_PAGE_CACHE_HIT, MATERIALIZED_PAGE_CACHE_HIT_DIRECT,
     103              : };
     104              : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
     105              : use crate::tenant::config::TenantConfOpt;
     106              : use pageserver_api::key::{is_inherited_key, is_rel_fsm_block_key, is_rel_vm_block_key};
     107              : use pageserver_api::reltag::RelTag;
     108              : use pageserver_api::shard::ShardIndex;
     109              : 
     110              : use postgres_connection::PgConnectionConfig;
     111              : use postgres_ffi::to_pg_timestamp;
     112              : use utils::{
     113              :     completion,
     114              :     generation::Generation,
     115              :     id::TimelineId,
     116              :     lsn::{AtomicLsn, Lsn, RecordLsn},
     117              :     seqwait::SeqWait,
     118              :     simple_rcu::{Rcu, RcuReadGuard},
     119              : };
     120              : 
     121              : use crate::page_cache;
     122              : use crate::repository::GcResult;
     123              : use crate::repository::{Key, Value};
     124              : use crate::task_mgr;
     125              : use crate::task_mgr::TaskKind;
     126              : use crate::ZERO_PAGE;
     127              : 
     128              : use self::delete::DeleteTimelineFlow;
     129              : pub(super) use self::eviction_task::EvictionTaskTenantState;
     130              : use self::eviction_task::EvictionTaskTimelineState;
     131              : use self::layer_manager::LayerManager;
     132              : use self::logical_size::LogicalSize;
     133              : use self::walreceiver::{WalReceiver, WalReceiverConf};
     134              : 
     135              : use super::secondary::heatmap::{HeatMapLayer, HeatMapTimeline};
     136              : use super::{config::TenantConf, storage_layer::VectoredValueReconstructState};
     137              : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
     138              : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
     139              : use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer};
     140              : 
     141              : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
     142              : pub(super) enum FlushLoopState {
     143              :     NotStarted,
     144              :     Running {
     145              :         #[cfg(test)]
     146              :         expect_initdb_optimization: bool,
     147              :         #[cfg(test)]
     148              :         initdb_optimization_count: usize,
     149              :     },
     150              :     Exited,
     151              : }
     152              : 
     153              : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
     154              : pub enum ImageLayerCreationMode {
     155              :     /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
     156              :     Try,
     157              :     /// Force creating the image layers if possible. For now, no image layers will be created
     158              :     /// for metadata keys. Used in compaction code path with force flag enabled.
     159              :     Force,
     160              :     /// Initial ingestion of the data, and no data should be dropped in this function. This
     161              :     /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
     162              :     /// code path.
     163              :     Initial,
     164              : }
     165              : 
     166              : impl std::fmt::Display for ImageLayerCreationMode {
     167          480 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     168          480 :         write!(f, "{:?}", self)
     169          480 :     }
     170              : }
     171              : 
     172              : /// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
     173              : #[derive(Debug, Clone, PartialEq, Eq)]
     174              : pub(crate) struct Hole {
     175              :     key_range: Range<Key>,
     176              :     coverage_size: usize,
     177              : }
     178              : 
     179              : impl Ord for Hole {
     180            0 :     fn cmp(&self, other: &Self) -> Ordering {
     181            0 :         other.coverage_size.cmp(&self.coverage_size) // inverse order
     182            0 :     }
     183              : }
     184              : 
     185              : impl PartialOrd for Hole {
     186            0 :     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
     187            0 :         Some(self.cmp(other))
     188            0 :     }
     189              : }
     190              : 
     191              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     192              : /// Can be removed after all refactors are done.
     193           28 : fn drop_rlock<T>(rlock: tokio::sync::OwnedRwLockReadGuard<T>) {
     194           28 :     drop(rlock)
     195           28 : }
     196              : 
     197              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     198              : /// Can be removed after all refactors are done.
     199          508 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
     200          508 :     drop(rlock)
     201          508 : }
     202              : 
     203              : /// The outward-facing resources required to build a Timeline
     204              : pub struct TimelineResources {
     205              :     pub remote_client: RemoteTimelineClient,
     206              :     pub deletion_queue_client: DeletionQueueClient,
     207              :     pub timeline_get_throttle: Arc<
     208              :         crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
     209              :     >,
     210              : }
     211              : 
     212              : pub(crate) struct AuxFilesState {
     213              :     pub(crate) dir: Option<AuxFilesDirectory>,
     214              :     pub(crate) n_deltas: usize,
     215              : }
     216              : 
     217              : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
     218              : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
     219              : /// implicitly extends the relation.  At startup, `complete_as_of` is initialized to the current end
     220              : /// of the timeline (disk_consistent_lsn).  It's used on reads of relation sizes to check if the
     221              : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
     222              : pub(crate) struct RelSizeCache {
     223              :     pub(crate) complete_as_of: Lsn,
     224              :     pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
     225              : }
     226              : 
     227              : pub struct Timeline {
     228              :     conf: &'static PageServerConf,
     229              :     tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
     230              : 
     231              :     myself: Weak<Self>,
     232              : 
     233              :     pub(crate) tenant_shard_id: TenantShardId,
     234              :     pub timeline_id: TimelineId,
     235              : 
     236              :     /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
     237              :     /// Never changes for the lifetime of this [`Timeline`] object.
     238              :     ///
     239              :     /// This duplicates the generation stored in LocationConf, but that structure is mutable:
     240              :     /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
     241              :     pub(crate) generation: Generation,
     242              : 
     243              :     /// The detailed sharding information from our parent Tenant.  This enables us to map keys
     244              :     /// to shards, and is constant through the lifetime of this Timeline.
     245              :     shard_identity: ShardIdentity,
     246              : 
     247              :     pub pg_version: u32,
     248              : 
     249              :     /// The tuple has two elements.
     250              :     /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
     251              :     /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
     252              :     ///
     253              :     /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
     254              :     /// We describe these rectangles through the `PersistentLayerDesc` struct.
     255              :     ///
     256              :     /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
     257              :     /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
     258              :     /// `PersistentLayerDesc`'s.
     259              :     ///
     260              :     /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
     261              :     /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
     262              :     /// runtime, e.g., during page reconstruction.
     263              :     ///
     264              :     /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
     265              :     /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
     266              :     pub(crate) layers: Arc<tokio::sync::RwLock<LayerManager>>,
     267              : 
     268              :     last_freeze_at: AtomicLsn,
     269              :     // Atomic would be more appropriate here.
     270              :     last_freeze_ts: RwLock<Instant>,
     271              : 
     272              :     pub(crate) standby_horizon: AtomicLsn,
     273              : 
     274              :     // WAL redo manager. `None` only for broken tenants.
     275              :     walredo_mgr: Option<Arc<super::WalRedoManager>>,
     276              : 
     277              :     /// Remote storage client.
     278              :     /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
     279              :     pub remote_client: Arc<RemoteTimelineClient>,
     280              : 
     281              :     // What page versions do we hold in the repository? If we get a
     282              :     // request > last_record_lsn, we need to wait until we receive all
     283              :     // the WAL up to the request. The SeqWait provides functions for
     284              :     // that. TODO: If we get a request for an old LSN, such that the
     285              :     // versions have already been garbage collected away, we should
     286              :     // throw an error, but we don't track that currently.
     287              :     //
     288              :     // last_record_lsn.load().last points to the end of last processed WAL record.
     289              :     //
     290              :     // We also remember the starting point of the previous record in
     291              :     // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
     292              :     // first WAL record when the node is started up. But here, we just
     293              :     // keep track of it.
     294              :     last_record_lsn: SeqWait<RecordLsn, Lsn>,
     295              : 
     296              :     // All WAL records have been processed and stored durably on files on
     297              :     // local disk, up to this LSN. On crash and restart, we need to re-process
     298              :     // the WAL starting from this point.
     299              :     //
     300              :     // Some later WAL records might have been processed and also flushed to disk
     301              :     // already, so don't be surprised to see some, but there's no guarantee on
     302              :     // them yet.
     303              :     disk_consistent_lsn: AtomicLsn,
     304              : 
     305              :     // Parent timeline that this timeline was branched from, and the LSN
     306              :     // of the branch point.
     307              :     ancestor_timeline: Option<Arc<Timeline>>,
     308              :     ancestor_lsn: Lsn,
     309              : 
     310              :     pub(super) metrics: TimelineMetrics,
     311              : 
     312              :     // `Timeline` doesn't write these metrics itself, but it manages the lifetime.  Code
     313              :     // in `crate::page_service` writes these metrics.
     314              :     pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
     315              : 
     316              :     directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
     317              : 
     318              :     /// Ensures layers aren't frozen by checkpointer between
     319              :     /// [`Timeline::get_layer_for_write`] and layer reads.
     320              :     /// Locked automatically by [`TimelineWriter`] and checkpointer.
     321              :     /// Must always be acquired before the layer map/individual layer lock
     322              :     /// to avoid deadlock.
     323              :     write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
     324              : 
     325              :     /// Used to avoid multiple `flush_loop` tasks running
     326              :     pub(super) flush_loop_state: Mutex<FlushLoopState>,
     327              : 
     328              :     /// layer_flush_start_tx can be used to wake up the layer-flushing task.
     329              :     /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
     330              :     ///   The flush cycle counter is sent back on the layer_flush_done channel when
     331              :     ///   the flush finishes. You can use that to wait for the flush to finish.
     332              :     /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
     333              :     ///   read by whoever sends an update
     334              :     layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
     335              :     /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
     336              :     layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
     337              : 
     338              :     // Needed to ensure that we can't create a branch at a point that was already garbage collected
     339              :     pub latest_gc_cutoff_lsn: Rcu<Lsn>,
     340              : 
     341              :     // List of child timelines and their branch points. This is needed to avoid
     342              :     // garbage collecting data that is still needed by the child timelines.
     343              :     pub(crate) gc_info: std::sync::RwLock<GcInfo>,
     344              : 
     345              :     // It may change across major versions so for simplicity
     346              :     // keep it after running initdb for a timeline.
     347              :     // It is needed in checks when we want to error on some operations
     348              :     // when they are requested for pre-initdb lsn.
     349              :     // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
     350              :     // though let's keep them both for better error visibility.
     351              :     pub initdb_lsn: Lsn,
     352              : 
     353              :     /// When did we last calculate the partitioning? Make it pub to test cases.
     354              :     pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
     355              : 
     356              :     /// Configuration: how often should the partitioning be recalculated.
     357              :     repartition_threshold: u64,
     358              : 
     359              :     last_image_layer_creation_check_at: AtomicLsn,
     360              : 
     361              :     /// Current logical size of the "datadir", at the last LSN.
     362              :     current_logical_size: LogicalSize,
     363              : 
     364              :     /// Information about the last processed message by the WAL receiver,
     365              :     /// or None if WAL receiver has not received anything for this timeline
     366              :     /// yet.
     367              :     pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
     368              :     pub walreceiver: Mutex<Option<WalReceiver>>,
     369              : 
     370              :     /// Relation size cache
     371              :     pub(crate) rel_size_cache: RwLock<RelSizeCache>,
     372              : 
     373              :     download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
     374              : 
     375              :     state: watch::Sender<TimelineState>,
     376              : 
     377              :     /// Prevent two tasks from deleting the timeline at the same time. If held, the
     378              :     /// timeline is being deleted. If 'true', the timeline has already been deleted.
     379              :     pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
     380              : 
     381              :     eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
     382              : 
     383              :     /// Load or creation time information about the disk_consistent_lsn and when the loading
     384              :     /// happened. Used for consumption metrics.
     385              :     pub(crate) loaded_at: (Lsn, SystemTime),
     386              : 
     387              :     /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
     388              :     pub(crate) gate: Gate,
     389              : 
     390              :     /// Cancellation token scoped to this timeline: anything doing long-running work relating
     391              :     /// to the timeline should drop out when this token fires.
     392              :     pub(crate) cancel: CancellationToken,
     393              : 
     394              :     /// Make sure we only have one running compaction at a time in tests.
     395              :     ///
     396              :     /// Must only be taken in two places:
     397              :     /// - [`Timeline::compact`] (this file)
     398              :     /// - [`delete::delete_local_timeline_directory`]
     399              :     ///
     400              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     401              :     compaction_lock: tokio::sync::Mutex<()>,
     402              : 
     403              :     /// Make sure we only have one running gc at a time.
     404              :     ///
     405              :     /// Must only be taken in two places:
     406              :     /// - [`Timeline::gc`] (this file)
     407              :     /// - [`delete::delete_local_timeline_directory`]
     408              :     ///
     409              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     410              :     gc_lock: tokio::sync::Mutex<()>,
     411              : 
     412              :     /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
     413              :     timeline_get_throttle: Arc<
     414              :         crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
     415              :     >,
     416              : 
     417              :     /// Keep aux directory cache to avoid it's reconstruction on each update
     418              :     pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
     419              : 
     420              :     /// Size estimator for aux file v2
     421              :     pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
     422              : 
     423              :     /// Indicate whether aux file v2 storage is enabled.
     424              :     pub(crate) last_aux_file_policy: AtomicAuxFilePolicy,
     425              : }
     426              : 
     427              : pub struct WalReceiverInfo {
     428              :     pub wal_source_connconf: PgConnectionConfig,
     429              :     pub last_received_msg_lsn: Lsn,
     430              :     pub last_received_msg_ts: u128,
     431              : }
     432              : 
     433              : /// Information about how much history needs to be retained, needed by
     434              : /// Garbage Collection.
     435              : #[derive(Default)]
     436              : pub(crate) struct GcInfo {
     437              :     /// Specific LSNs that are needed.
     438              :     ///
     439              :     /// Currently, this includes all points where child branches have
     440              :     /// been forked off from. In the future, could also include
     441              :     /// explicit user-defined snapshot points.
     442              :     pub(crate) retain_lsns: Vec<Lsn>,
     443              : 
     444              :     /// The cutoff coordinates, which are combined by selecting the minimum.
     445              :     pub(crate) cutoffs: GcCutoffs,
     446              : }
     447              : 
     448              : impl GcInfo {
     449          224 :     pub(crate) fn min_cutoff(&self) -> Lsn {
     450          224 :         self.cutoffs.select_min()
     451          224 :     }
     452              : }
     453              : 
     454              : /// The `GcInfo` component describing which Lsns need to be retained.
     455              : #[derive(Debug)]
     456              : pub(crate) struct GcCutoffs {
     457              :     /// Keep everything newer than this point.
     458              :     ///
     459              :     /// This is calculated by subtracting 'gc_horizon' setting from
     460              :     /// last-record LSN
     461              :     ///
     462              :     /// FIXME: is this inclusive or exclusive?
     463              :     pub(crate) horizon: Lsn,
     464              : 
     465              :     /// In addition to 'retain_lsns' and 'horizon_cutoff', keep everything newer than this
     466              :     /// point.
     467              :     ///
     468              :     /// This is calculated by finding a number such that a record is needed for PITR
     469              :     /// if only if its LSN is larger than 'pitr_cutoff'.
     470              :     pub(crate) pitr: Lsn,
     471              : }
     472              : 
     473              : impl Default for GcCutoffs {
     474          352 :     fn default() -> Self {
     475          352 :         Self {
     476          352 :             horizon: Lsn::INVALID,
     477          352 :             pitr: Lsn::INVALID,
     478          352 :         }
     479          352 :     }
     480              : }
     481              : 
     482              : impl GcCutoffs {
     483          224 :     fn select_min(&self) -> Lsn {
     484          224 :         std::cmp::min(self.horizon, self.pitr)
     485          224 :     }
     486              : }
     487              : 
     488              : pub(crate) struct TimelineVisitOutcome {
     489              :     completed_keyspace: KeySpace,
     490              :     image_covered_keyspace: KeySpace,
     491              : }
     492              : 
     493              : /// An error happened in a get() operation.
     494            2 : #[derive(thiserror::Error, Debug)]
     495              : pub(crate) enum PageReconstructError {
     496              :     #[error(transparent)]
     497              :     Other(#[from] anyhow::Error),
     498              : 
     499              :     #[error("Ancestor LSN wait error: {0}")]
     500              :     AncestorLsnTimeout(#[from] WaitLsnError),
     501              : 
     502              :     #[error("timeline shutting down")]
     503              :     Cancelled,
     504              : 
     505              :     /// The ancestor of this is being stopped
     506              :     #[error("ancestor timeline {0} is being stopped")]
     507              :     AncestorStopping(TimelineId),
     508              : 
     509              :     /// An error happened replaying WAL records
     510              :     #[error(transparent)]
     511              :     WalRedo(anyhow::Error),
     512              : 
     513              :     #[error("{0}")]
     514              :     MissingKey(MissingKeyError),
     515              : }
     516              : 
     517              : impl GetVectoredError {
     518              :     #[cfg(test)]
     519            6 :     pub(crate) fn is_missing_key_error(&self) -> bool {
     520            6 :         matches!(self, Self::MissingKey(_))
     521            6 :     }
     522              : }
     523              : 
     524              : #[derive(Debug)]
     525              : pub struct MissingKeyError {
     526              :     key: Key,
     527              :     shard: ShardNumber,
     528              :     cont_lsn: Lsn,
     529              :     request_lsn: Lsn,
     530              :     ancestor_lsn: Option<Lsn>,
     531              :     traversal_path: Vec<TraversalPathItem>,
     532              :     backtrace: Option<std::backtrace::Backtrace>,
     533              : }
     534              : 
     535              : impl std::fmt::Display for MissingKeyError {
     536            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     537            0 :         write!(
     538            0 :             f,
     539            0 :             "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
     540            0 :             self.key, self.shard, self.cont_lsn, self.request_lsn
     541            0 :         )?;
     542            0 :         if let Some(ref ancestor_lsn) = self.ancestor_lsn {
     543            0 :             write!(f, ", ancestor {}", ancestor_lsn)?;
     544            0 :         }
     545              : 
     546            0 :         if !self.traversal_path.is_empty() {
     547            0 :             writeln!(f)?;
     548            0 :         }
     549              : 
     550            0 :         for (r, c, l) in &self.traversal_path {
     551            0 :             writeln!(
     552            0 :                 f,
     553            0 :                 "layer traversal: result {:?}, cont_lsn {}, layer: {}",
     554            0 :                 r, c, l,
     555            0 :             )?;
     556              :         }
     557              : 
     558            0 :         if let Some(ref backtrace) = self.backtrace {
     559            0 :             write!(f, "\n{}", backtrace)?;
     560            0 :         }
     561              : 
     562            0 :         Ok(())
     563            0 :     }
     564              : }
     565              : 
     566              : impl PageReconstructError {
     567              :     /// Returns true if this error indicates a tenant/timeline shutdown alike situation
     568            0 :     pub(crate) fn is_stopping(&self) -> bool {
     569            0 :         use PageReconstructError::*;
     570            0 :         match self {
     571            0 :             Other(_) => false,
     572            0 :             AncestorLsnTimeout(_) => false,
     573            0 :             Cancelled | AncestorStopping(_) => true,
     574            0 :             WalRedo(_) => false,
     575            0 :             MissingKey { .. } => false,
     576              :         }
     577            0 :     }
     578              : }
     579              : 
     580            0 : #[derive(thiserror::Error, Debug)]
     581              : enum CreateImageLayersError {
     582              :     #[error("timeline shutting down")]
     583              :     Cancelled,
     584              : 
     585              :     #[error(transparent)]
     586              :     GetVectoredError(GetVectoredError),
     587              : 
     588              :     #[error(transparent)]
     589              :     PageReconstructError(PageReconstructError),
     590              : 
     591              :     #[error(transparent)]
     592              :     Other(#[from] anyhow::Error),
     593              : }
     594              : 
     595            0 : #[derive(thiserror::Error, Debug)]
     596              : enum FlushLayerError {
     597              :     /// Timeline cancellation token was cancelled
     598              :     #[error("timeline shutting down")]
     599              :     Cancelled,
     600              : 
     601              :     #[error(transparent)]
     602              :     CreateImageLayersError(CreateImageLayersError),
     603              : 
     604              :     #[error(transparent)]
     605              :     Other(#[from] anyhow::Error),
     606              : }
     607              : 
     608            0 : #[derive(thiserror::Error, Debug)]
     609              : pub(crate) enum GetVectoredError {
     610              :     #[error("timeline shutting down")]
     611              :     Cancelled,
     612              : 
     613              :     #[error("Requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
     614              :     Oversized(u64),
     615              : 
     616              :     #[error("Requested at invalid LSN: {0}")]
     617              :     InvalidLsn(Lsn),
     618              : 
     619              :     #[error("Requested key not found: {0}")]
     620              :     MissingKey(MissingKeyError),
     621              : 
     622              :     #[error(transparent)]
     623              :     GetReadyAncestorError(GetReadyAncestorError),
     624              : 
     625              :     #[error(transparent)]
     626              :     Other(#[from] anyhow::Error),
     627              : }
     628              : 
     629            0 : #[derive(thiserror::Error, Debug)]
     630              : pub(crate) enum GetReadyAncestorError {
     631              :     #[error("ancestor timeline {0} is being stopped")]
     632              :     AncestorStopping(TimelineId),
     633              : 
     634              :     #[error("Ancestor LSN wait error: {0}")]
     635              :     AncestorLsnTimeout(#[from] WaitLsnError),
     636              : 
     637              :     #[error("Cancelled")]
     638              :     Cancelled,
     639              : 
     640              :     #[error(transparent)]
     641              :     Other(#[from] anyhow::Error),
     642              : }
     643              : 
     644              : #[derive(Clone, Copy)]
     645              : pub enum LogicalSizeCalculationCause {
     646              :     Initial,
     647              :     ConsumptionMetricsSyntheticSize,
     648              :     EvictionTaskImitation,
     649              :     TenantSizeHandler,
     650              : }
     651              : 
     652              : pub enum GetLogicalSizePriority {
     653              :     User,
     654              :     Background,
     655              : }
     656              : 
     657            0 : #[derive(enumset::EnumSetType)]
     658              : pub(crate) enum CompactFlags {
     659              :     ForceRepartition,
     660              :     ForceImageLayerCreation,
     661              : }
     662              : 
     663              : impl std::fmt::Debug for Timeline {
     664            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
     665            0 :         write!(f, "Timeline<{}>", self.timeline_id)
     666            0 :     }
     667              : }
     668              : 
     669            0 : #[derive(thiserror::Error, Debug)]
     670              : pub(crate) enum WaitLsnError {
     671              :     // Called on a timeline which is shutting down
     672              :     #[error("Shutdown")]
     673              :     Shutdown,
     674              : 
     675              :     // Called on an timeline not in active state or shutting down
     676              :     #[error("Bad state (not active)")]
     677              :     BadState,
     678              : 
     679              :     // Timeout expired while waiting for LSN to catch up with goal.
     680              :     #[error("{0}")]
     681              :     Timeout(String),
     682              : }
     683              : 
     684              : // The impls below achieve cancellation mapping for errors.
     685              : // Perhaps there's a way of achieving this with less cruft.
     686              : 
     687              : impl From<CreateImageLayersError> for CompactionError {
     688            0 :     fn from(e: CreateImageLayersError) -> Self {
     689            0 :         match e {
     690            0 :             CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
     691            0 :             _ => CompactionError::Other(e.into()),
     692              :         }
     693            0 :     }
     694              : }
     695              : 
     696              : impl From<CreateImageLayersError> for FlushLayerError {
     697            0 :     fn from(e: CreateImageLayersError) -> Self {
     698            0 :         match e {
     699            0 :             CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
     700            0 :             any => FlushLayerError::CreateImageLayersError(any),
     701              :         }
     702            0 :     }
     703              : }
     704              : 
     705              : impl From<PageReconstructError> for CreateImageLayersError {
     706            0 :     fn from(e: PageReconstructError) -> Self {
     707            0 :         match e {
     708            0 :             PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
     709            0 :             _ => CreateImageLayersError::PageReconstructError(e),
     710              :         }
     711            0 :     }
     712              : }
     713              : 
     714              : impl From<GetVectoredError> for CreateImageLayersError {
     715            0 :     fn from(e: GetVectoredError) -> Self {
     716            0 :         match e {
     717            0 :             GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
     718            0 :             _ => CreateImageLayersError::GetVectoredError(e),
     719              :         }
     720            0 :     }
     721              : }
     722              : 
     723              : impl From<GetVectoredError> for PageReconstructError {
     724            0 :     fn from(e: GetVectoredError) -> Self {
     725            0 :         match e {
     726            0 :             GetVectoredError::Cancelled => PageReconstructError::Cancelled,
     727            0 :             GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
     728            0 :             err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
     729            0 :             GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
     730            0 :             GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
     731            0 :             GetVectoredError::Other(err) => PageReconstructError::Other(err),
     732              :         }
     733            0 :     }
     734              : }
     735              : 
     736              : impl From<GetReadyAncestorError> for PageReconstructError {
     737            2 :     fn from(e: GetReadyAncestorError) -> Self {
     738            2 :         use GetReadyAncestorError::*;
     739            2 :         match e {
     740            0 :             AncestorStopping(tid) => PageReconstructError::AncestorStopping(tid),
     741            0 :             AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
     742            0 :             Cancelled => PageReconstructError::Cancelled,
     743            2 :             Other(other) => PageReconstructError::Other(other),
     744              :         }
     745            2 :     }
     746              : }
     747              : 
     748              : #[derive(
     749              :     Eq,
     750              :     PartialEq,
     751              :     Debug,
     752              :     Copy,
     753              :     Clone,
     754          160 :     strum_macros::EnumString,
     755            0 :     strum_macros::Display,
     756            0 :     serde_with::DeserializeFromStr,
     757              :     serde_with::SerializeDisplay,
     758              : )]
     759              : #[strum(serialize_all = "kebab-case")]
     760              : pub enum GetVectoredImpl {
     761              :     Sequential,
     762              :     Vectored,
     763              : }
     764              : 
     765              : #[derive(
     766              :     Eq,
     767              :     PartialEq,
     768              :     Debug,
     769              :     Copy,
     770              :     Clone,
     771          160 :     strum_macros::EnumString,
     772            0 :     strum_macros::Display,
     773            0 :     serde_with::DeserializeFromStr,
     774              :     serde_with::SerializeDisplay,
     775              : )]
     776              : #[strum(serialize_all = "kebab-case")]
     777              : pub enum GetImpl {
     778              :     Legacy,
     779              :     Vectored,
     780              : }
     781              : 
     782              : pub(crate) enum WaitLsnWaiter<'a> {
     783              :     Timeline(&'a Timeline),
     784              :     Tenant,
     785              :     PageService,
     786              : }
     787              : 
     788              : /// Argument to [`Timeline::shutdown`].
     789              : #[derive(Debug, Clone, Copy)]
     790              : pub(crate) enum ShutdownMode {
     791              :     /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
     792              :     /// also to remote storage.  This method can easily take multiple seconds for a busy timeline.
     793              :     ///
     794              :     /// While we are flushing, we continue to accept read I/O for LSNs ingested before
     795              :     /// the call to [`Timeline::shutdown`].
     796              :     FreezeAndFlush,
     797              :     /// Shut down immediately, without waiting for any open layers to flush.
     798              :     Hard,
     799              : }
     800              : 
     801              : struct ImageLayerCreationOutcome {
     802              :     image: Option<ResidentLayer>,
     803              :     next_start_key: Key,
     804              : }
     805              : 
     806              : /// Public interface functions
     807              : impl Timeline {
     808              :     /// Get the LSN where this branch was created
     809            8 :     pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
     810            8 :         self.ancestor_lsn
     811            8 :     }
     812              : 
     813              :     /// Get the ancestor's timeline id
     814         3314 :     pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
     815         3314 :         self.ancestor_timeline
     816         3314 :             .as_ref()
     817         3314 :             .map(|ancestor| ancestor.timeline_id)
     818         3314 :     }
     819              : 
     820              :     /// Lock and get timeline's GC cutoff
     821          983 :     pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
     822          983 :         self.latest_gc_cutoff_lsn.read()
     823          983 :     }
     824              : 
     825              :     /// Look up given page version.
     826              :     ///
     827              :     /// If a remote layer file is needed, it is downloaded as part of this
     828              :     /// call.
     829              :     ///
     830              :     /// This method enforces [`Self::timeline_get_throttle`] internally.
     831              :     ///
     832              :     /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
     833              :     /// abstraction above this needs to store suitable metadata to track what
     834              :     /// data exists with what keys, in separate metadata entries. If a
     835              :     /// non-existent key is requested, we may incorrectly return a value from
     836              :     /// an ancestor branch, for example, or waste a lot of cycles chasing the
     837              :     /// non-existing key.
     838              :     ///
     839              :     /// # Cancel-Safety
     840              :     ///
     841              :     /// This method is cancellation-safe.
     842              :     #[inline(always)]
     843       624021 :     pub(crate) async fn get(
     844       624021 :         &self,
     845       624021 :         key: Key,
     846       624021 :         lsn: Lsn,
     847       624021 :         ctx: &RequestContext,
     848       624021 :     ) -> Result<Bytes, PageReconstructError> {
     849       624021 :         if !lsn.is_valid() {
     850            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
     851       624021 :         }
     852       624021 : 
     853       624021 :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
     854       624021 :         // already checked the key against the shard_identity when looking up the Timeline from
     855       624021 :         // page_service.
     856       624021 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
     857              : 
     858       624021 :         self.timeline_get_throttle.throttle(ctx, 1).await;
     859              : 
     860              :         // Check the page cache. We will get back the most recent page with lsn <= `lsn`.
     861              :         // The cached image can be returned directly if there is no WAL between the cached image
     862              :         // and requested LSN. The cached image can also be used to reduce the amount of WAL needed
     863              :         // for redo.
     864       624021 :         let cached_page_img = match self.lookup_cached_page(&key, lsn, ctx).await {
     865            0 :             Some((cached_lsn, cached_img)) => {
     866            0 :                 match cached_lsn.cmp(&lsn) {
     867            0 :                     Ordering::Less => {} // there might be WAL between cached_lsn and lsn, we need to check
     868              :                     Ordering::Equal => {
     869            0 :                         MATERIALIZED_PAGE_CACHE_HIT_DIRECT.inc();
     870            0 :                         return Ok(cached_img); // exact LSN match, return the image
     871              :                     }
     872              :                     Ordering::Greater => {
     873            0 :                         unreachable!("the returned lsn should never be after the requested lsn")
     874              :                     }
     875              :                 }
     876            0 :                 Some((cached_lsn, cached_img))
     877              :             }
     878       624021 :             None => None,
     879              :         };
     880              : 
     881       624021 :         match self.conf.get_impl {
     882              :             GetImpl::Legacy => {
     883       624021 :                 let reconstruct_state = ValueReconstructState {
     884       624021 :                     records: Vec::new(),
     885       624021 :                     img: cached_page_img,
     886       624021 :                 };
     887       624021 : 
     888       624021 :                 self.get_impl(key, lsn, reconstruct_state, ctx).await
     889              :             }
     890              :             GetImpl::Vectored => {
     891            0 :                 let keyspace = KeySpace {
     892            0 :                     ranges: vec![key..key.next()],
     893            0 :                 };
     894            0 : 
     895            0 :                 // Initialise the reconstruct state for the key with the cache
     896            0 :                 // entry returned above.
     897            0 :                 let mut reconstruct_state = ValuesReconstructState::new();
     898            0 : 
     899            0 :                 // Only add the cached image to the reconstruct state when it exists.
     900            0 :                 if cached_page_img.is_some() {
     901            0 :                     let mut key_state = VectoredValueReconstructState::default();
     902            0 :                     key_state.img = cached_page_img;
     903            0 :                     reconstruct_state.keys.insert(key, Ok(key_state));
     904            0 :                 }
     905              : 
     906            0 :                 let vectored_res = self
     907            0 :                     .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
     908            0 :                     .await;
     909              : 
     910            0 :                 if self.conf.validate_vectored_get {
     911            0 :                     self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
     912            0 :                         .await;
     913            0 :                 }
     914              : 
     915            0 :                 let key_value = vectored_res?.pop_first();
     916            0 :                 match key_value {
     917            0 :                     Some((got_key, value)) => {
     918            0 :                         if got_key != key {
     919            0 :                             error!(
     920            0 :                                 "Expected {}, but singular vectored get returned {}",
     921              :                                 key, got_key
     922              :                             );
     923            0 :                             Err(PageReconstructError::Other(anyhow!(
     924            0 :                                 "Singular vectored get returned wrong key"
     925            0 :                             )))
     926              :                         } else {
     927            0 :                             value
     928              :                         }
     929              :                     }
     930            0 :                     None => Err(PageReconstructError::MissingKey(MissingKeyError {
     931            0 :                         key,
     932            0 :                         shard: self.shard_identity.get_shard_number(&key),
     933            0 :                         cont_lsn: Lsn(0),
     934            0 :                         request_lsn: lsn,
     935            0 :                         ancestor_lsn: None,
     936            0 :                         traversal_path: Vec::new(),
     937            0 :                         backtrace: None,
     938            0 :                     })),
     939              :                 }
     940              :             }
     941              :         }
     942       624021 :     }
     943              : 
     944              :     /// Not subject to [`Self::timeline_get_throttle`].
     945       625369 :     async fn get_impl(
     946       625369 :         &self,
     947       625369 :         key: Key,
     948       625369 :         lsn: Lsn,
     949       625369 :         mut reconstruct_state: ValueReconstructState,
     950       625369 :         ctx: &RequestContext,
     951       625369 :     ) -> Result<Bytes, PageReconstructError> {
     952       625369 :         // XXX: structured stats collection for layer eviction here.
     953       625369 :         trace!(
     954            0 :             "get page request for {}@{} from task kind {:?}",
     955            0 :             key,
     956            0 :             lsn,
     957            0 :             ctx.task_kind()
     958              :         );
     959              : 
     960       625369 :         let timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
     961       625369 :             .for_get_kind(GetKind::Singular)
     962       625369 :             .start_timer();
     963       625369 :         let path = self
     964       625369 :             .get_reconstruct_data(key, lsn, &mut reconstruct_state, ctx)
     965        43615 :             .await?;
     966       625245 :         timer.stop_and_record();
     967       625245 : 
     968       625245 :         let start = Instant::now();
     969       625245 :         let res = self.reconstruct_value(key, lsn, reconstruct_state).await;
     970       625245 :         let elapsed = start.elapsed();
     971       625245 :         crate::metrics::RECONSTRUCT_TIME
     972       625245 :             .for_get_kind(GetKind::Singular)
     973       625245 :             .observe(elapsed.as_secs_f64());
     974       625245 : 
     975       625245 :         if cfg!(feature = "testing") && res.is_err() {
     976              :             // it can only be walredo issue
     977              :             use std::fmt::Write;
     978              : 
     979            0 :             let mut msg = String::new();
     980            0 : 
     981            0 :             path.into_iter().for_each(|(res, cont_lsn, layer)| {
     982            0 :                 writeln!(
     983            0 :                     msg,
     984            0 :                     "- layer traversal: result {res:?}, cont_lsn {cont_lsn}, layer: {}",
     985            0 :                     layer,
     986            0 :                 )
     987            0 :                 .expect("string grows")
     988            0 :             });
     989            0 : 
     990            0 :             // this is to rule out or provide evidence that we could in some cases read a duplicate
     991            0 :             // walrecord
     992            0 :             tracing::info!("walredo failed, path:\n{msg}");
     993       625245 :         }
     994              : 
     995       625245 :         res
     996       625369 :     }
     997              : 
     998              :     pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
     999              : 
    1000              :     /// Look up multiple page versions at a given LSN
    1001              :     ///
    1002              :     /// This naive implementation will be replaced with a more efficient one
    1003              :     /// which actually vectorizes the read path.
    1004          774 :     pub(crate) async fn get_vectored(
    1005          774 :         &self,
    1006          774 :         keyspace: KeySpace,
    1007          774 :         lsn: Lsn,
    1008          774 :         ctx: &RequestContext,
    1009          774 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1010          774 :         if !lsn.is_valid() {
    1011            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1012          774 :         }
    1013          774 : 
    1014          774 :         let key_count = keyspace.total_raw_size().try_into().unwrap();
    1015          774 :         if key_count > Timeline::MAX_GET_VECTORED_KEYS {
    1016            0 :             return Err(GetVectoredError::Oversized(key_count));
    1017          774 :         }
    1018              : 
    1019         1548 :         for range in &keyspace.ranges {
    1020          774 :             let mut key = range.start;
    1021         1800 :             while key != range.end {
    1022         1026 :                 assert!(!self.shard_identity.is_key_disposable(&key));
    1023         1026 :                 key = key.next();
    1024              :             }
    1025              :         }
    1026              : 
    1027          774 :         trace!(
    1028            0 :             "get vectored request for {:?}@{} from task kind {:?} will use {} implementation",
    1029            0 :             keyspace,
    1030            0 :             lsn,
    1031            0 :             ctx.task_kind(),
    1032              :             self.conf.get_vectored_impl
    1033              :         );
    1034              : 
    1035          774 :         let start = crate::metrics::GET_VECTORED_LATENCY
    1036          774 :             .for_task_kind(ctx.task_kind())
    1037          774 :             .map(|metric| (metric, Instant::now()));
    1038              : 
    1039              :         // start counting after throttle so that throttle time
    1040              :         // is always less than observation time
    1041          774 :         let throttled = self
    1042          774 :             .timeline_get_throttle
    1043          774 :             .throttle(ctx, key_count as usize)
    1044            0 :             .await;
    1045              : 
    1046          774 :         let res = match self.conf.get_vectored_impl {
    1047              :             GetVectoredImpl::Sequential => {
    1048          774 :                 self.get_vectored_sequential_impl(keyspace, lsn, ctx).await
    1049              :             }
    1050              :             GetVectoredImpl::Vectored => {
    1051            0 :                 let vectored_res = self
    1052            0 :                     .get_vectored_impl(
    1053            0 :                         keyspace.clone(),
    1054            0 :                         lsn,
    1055            0 :                         &mut ValuesReconstructState::new(),
    1056            0 :                         ctx,
    1057            0 :                     )
    1058            0 :                     .await;
    1059              : 
    1060            0 :                 if self.conf.validate_vectored_get {
    1061            0 :                     self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
    1062            0 :                         .await;
    1063            0 :                 }
    1064              : 
    1065            0 :                 vectored_res
    1066              :             }
    1067              :         };
    1068              : 
    1069          774 :         if let Some((metric, start)) = start {
    1070            0 :             let elapsed = start.elapsed();
    1071            0 :             let ex_throttled = if let Some(throttled) = throttled {
    1072            0 :                 elapsed.checked_sub(throttled)
    1073              :             } else {
    1074            0 :                 Some(elapsed)
    1075              :             };
    1076              : 
    1077            0 :             if let Some(ex_throttled) = ex_throttled {
    1078            0 :                 metric.observe(ex_throttled.as_secs_f64());
    1079            0 :             } else {
    1080            0 :                 use utils::rate_limit::RateLimit;
    1081            0 :                 static LOGGED: Lazy<Mutex<RateLimit>> =
    1082            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
    1083            0 :                 let mut rate_limit = LOGGED.lock().unwrap();
    1084            0 :                 rate_limit.call(|| {
    1085            0 :                     warn!("error deducting time spent throttled; this message is logged at a global rate limit");
    1086            0 :                 });
    1087            0 :             }
    1088          774 :         }
    1089              : 
    1090          774 :         res
    1091          774 :     }
    1092              : 
    1093              :     /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
    1094              :     /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
    1095              :     /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
    1096              :     /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
    1097              :     /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
    1098              :     /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
    1099              :     /// the scan operation will not cause OOM in the future.
    1100              :     #[allow(dead_code)]
    1101            8 :     pub(crate) async fn scan(
    1102            8 :         &self,
    1103            8 :         keyspace: KeySpace,
    1104            8 :         lsn: Lsn,
    1105            8 :         ctx: &RequestContext,
    1106            8 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1107            8 :         if !lsn.is_valid() {
    1108            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1109            8 :         }
    1110            8 : 
    1111            8 :         trace!(
    1112            0 :             "key-value scan request for {:?}@{} from task kind {:?}",
    1113            0 :             keyspace,
    1114            0 :             lsn,
    1115            0 :             ctx.task_kind()
    1116              :         );
    1117              : 
    1118              :         // We should generalize this into Keyspace::contains in the future.
    1119           16 :         for range in &keyspace.ranges {
    1120            8 :             if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
    1121            8 :                 || range.end.field1 > METADATA_KEY_END_PREFIX
    1122              :             {
    1123            0 :                 return Err(GetVectoredError::Other(anyhow::anyhow!(
    1124            0 :                     "only metadata keyspace can be scanned"
    1125            0 :                 )));
    1126            8 :             }
    1127              :         }
    1128              : 
    1129            8 :         let start = crate::metrics::SCAN_LATENCY
    1130            8 :             .for_task_kind(ctx.task_kind())
    1131            8 :             .map(ScanLatencyOngoingRecording::start_recording);
    1132              : 
    1133              :         // start counting after throttle so that throttle time
    1134              :         // is always less than observation time
    1135            8 :         let throttled = self
    1136            8 :             .timeline_get_throttle
    1137            8 :             // assume scan = 1 quota for now until we find a better way to process this
    1138            8 :             .throttle(ctx, 1)
    1139            0 :             .await;
    1140              : 
    1141            8 :         let vectored_res = self
    1142            8 :             .get_vectored_impl(
    1143            8 :                 keyspace.clone(),
    1144            8 :                 lsn,
    1145            8 :                 &mut ValuesReconstructState::default(),
    1146            8 :                 ctx,
    1147            8 :             )
    1148            0 :             .await;
    1149              : 
    1150            8 :         if let Some(recording) = start {
    1151            0 :             recording.observe(throttled);
    1152            8 :         }
    1153              : 
    1154            8 :         vectored_res
    1155            8 :     }
    1156              : 
    1157              :     /// Not subject to [`Self::timeline_get_throttle`].
    1158          786 :     pub(super) async fn get_vectored_sequential_impl(
    1159          786 :         &self,
    1160          786 :         keyspace: KeySpace,
    1161          786 :         lsn: Lsn,
    1162          786 :         ctx: &RequestContext,
    1163          786 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1164          786 :         let mut values = BTreeMap::new();
    1165              : 
    1166         1572 :         for range in keyspace.ranges {
    1167          786 :             let mut key = range.start;
    1168         2134 :             while key != range.end {
    1169         1348 :                 let block = self
    1170         1348 :                     .get_impl(key, lsn, ValueReconstructState::default(), ctx)
    1171           37 :                     .await;
    1172              : 
    1173              :                 use PageReconstructError::*;
    1174            0 :                 match block {
    1175              :                     Err(Cancelled | AncestorStopping(_)) => {
    1176            0 :                         return Err(GetVectoredError::Cancelled)
    1177              :                     }
    1178              :                     Err(MissingKey(_))
    1179            2 :                         if NON_INHERITED_RANGE.contains(&key)
    1180            2 :                             || NON_INHERITED_SPARSE_RANGE.contains(&key) =>
    1181            2 :                     {
    1182            2 :                         // Ignore missing key error for aux key range. TODO: currently, we assume non_inherited_range == aux_key_range.
    1183            2 :                         // When we add more types of keys into the page server, we should revisit this part of code and throw errors
    1184            2 :                         // accordingly.
    1185            2 :                         key = key.next();
    1186            2 :                     }
    1187            0 :                     Err(MissingKey(err)) => {
    1188            0 :                         return Err(GetVectoredError::MissingKey(err));
    1189              :                     }
    1190            0 :                     Err(Other(err))
    1191            0 :                         if err
    1192            0 :                             .to_string()
    1193            0 :                             .contains("downloading evicted layer file failed") =>
    1194            0 :                     {
    1195            0 :                         return Err(GetVectoredError::Other(err))
    1196              :                     }
    1197            0 :                     Err(Other(err))
    1198            0 :                         if err
    1199            0 :                             .chain()
    1200            0 :                             .any(|cause| cause.to_string().contains("layer loading failed")) =>
    1201            0 :                     {
    1202            0 :                         // The intent here is to achieve error parity with the vectored read path.
    1203            0 :                         // When vectored read fails to load a layer it fails the whole read, hence
    1204            0 :                         // we mimic this behaviour here to keep the validation happy.
    1205            0 :                         return Err(GetVectoredError::Other(err));
    1206              :                     }
    1207         1346 :                     _ => {
    1208         1346 :                         values.insert(key, block);
    1209         1346 :                         key = key.next();
    1210         1346 :                     }
    1211              :                 }
    1212              :             }
    1213              :         }
    1214              : 
    1215          786 :         Ok(values)
    1216          786 :     }
    1217              : 
    1218          470 :     pub(super) async fn get_vectored_impl(
    1219          470 :         &self,
    1220          470 :         keyspace: KeySpace,
    1221          470 :         lsn: Lsn,
    1222          470 :         reconstruct_state: &mut ValuesReconstructState,
    1223          470 :         ctx: &RequestContext,
    1224          470 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1225          470 :         let get_kind = if keyspace.total_raw_size() == 1 {
    1226           56 :             GetKind::Singular
    1227              :         } else {
    1228          414 :             GetKind::Vectored
    1229              :         };
    1230              : 
    1231          470 :         let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
    1232          470 :             .for_get_kind(get_kind)
    1233          470 :             .start_timer();
    1234          470 :         self.get_vectored_reconstruct_data(keyspace, lsn, reconstruct_state, ctx)
    1235        11519 :             .await?;
    1236          460 :         get_data_timer.stop_and_record();
    1237          460 : 
    1238          460 :         let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
    1239          460 :             .for_get_kind(get_kind)
    1240          460 :             .start_timer();
    1241          460 :         let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
    1242          460 :         let layers_visited = reconstruct_state.get_layers_visited();
    1243              : 
    1244        78418 :         for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
    1245        78418 :             match res {
    1246            0 :                 Err(err) => {
    1247            0 :                     results.insert(key, Err(err));
    1248            0 :                 }
    1249        78418 :                 Ok(state) => {
    1250        78418 :                     let state = ValueReconstructState::from(state);
    1251              : 
    1252        78418 :                     let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
    1253        78418 :                     results.insert(key, reconstruct_res);
    1254              :                 }
    1255              :             }
    1256              :         }
    1257          460 :         reconstruct_timer.stop_and_record();
    1258          460 : 
    1259          460 :         // For aux file keys (v1 or v2) the vectored read path does not return an error
    1260          460 :         // when they're missing. Instead they are omitted from the resulting btree
    1261          460 :         // (this is a requirement, not a bug). Skip updating the metric in these cases
    1262          460 :         // to avoid infinite results.
    1263          460 :         if !results.is_empty() {
    1264          138 :             // Note that this is an approximation. Tracking the exact number of layers visited
    1265          138 :             // per key requires virtually unbounded memory usage and is inefficient
    1266          138 :             // (i.e. segment tree tracking each range queried from a layer)
    1267          138 :             crate::metrics::VEC_READ_NUM_LAYERS_VISITED
    1268          138 :                 .observe(layers_visited as f64 / results.len() as f64);
    1269          322 :         }
    1270              : 
    1271          460 :         Ok(results)
    1272          470 :     }
    1273              : 
    1274              :     /// Not subject to [`Self::timeline_get_throttle`].
    1275           12 :     pub(super) async fn validate_get_vectored_impl(
    1276           12 :         &self,
    1277           12 :         vectored_res: &Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError>,
    1278           12 :         keyspace: KeySpace,
    1279           12 :         lsn: Lsn,
    1280           12 :         ctx: &RequestContext,
    1281           12 :     ) {
    1282           12 :         if keyspace.overlaps(&Key::metadata_key_range()) {
    1283              :             // skip validation for metadata key range
    1284            0 :             return;
    1285           12 :         }
    1286              : 
    1287           12 :         let sequential_res = self
    1288           12 :             .get_vectored_sequential_impl(keyspace.clone(), lsn, ctx)
    1289           19 :             .await;
    1290              : 
    1291            0 :         fn errors_match(lhs: &GetVectoredError, rhs: &GetVectoredError) -> bool {
    1292            0 :             use GetVectoredError::*;
    1293            0 :             match (lhs, rhs) {
    1294            0 :                 (Oversized(l), Oversized(r)) => l == r,
    1295            0 :                 (InvalidLsn(l), InvalidLsn(r)) => l == r,
    1296            0 :                 (MissingKey(l), MissingKey(r)) => l.key == r.key,
    1297            0 :                 (GetReadyAncestorError(_), GetReadyAncestorError(_)) => true,
    1298            0 :                 (Other(_), Other(_)) => true,
    1299            0 :                 _ => false,
    1300              :             }
    1301            0 :         }
    1302              : 
    1303           12 :         match (&sequential_res, vectored_res) {
    1304            0 :             (Err(GetVectoredError::Cancelled), _) => {},
    1305            0 :             (_, Err(GetVectoredError::Cancelled)) => {},
    1306            0 :             (Err(seq_err), Ok(_)) => {
    1307            0 :                 panic!(concat!("Sequential get failed with {}, but vectored get did not",
    1308            0 :                                " - keyspace={:?} lsn={}"),
    1309            0 :                        seq_err, keyspace, lsn) },
    1310            0 :             (Ok(_), Err(GetVectoredError::GetReadyAncestorError(GetReadyAncestorError::AncestorLsnTimeout(_)))) => {
    1311            0 :                 // Sequential get runs after vectored get, so it is possible for the later
    1312            0 :                 // to time out while waiting for its ancestor's Lsn to become ready and for the
    1313            0 :                 // former to succeed (it essentially has a doubled wait time).
    1314            0 :             },
    1315            0 :             (Ok(_), Err(vec_err)) => {
    1316            0 :                 panic!(concat!("Vectored get failed with {}, but sequential get did not",
    1317            0 :                                " - keyspace={:?} lsn={}"),
    1318            0 :                        vec_err, keyspace, lsn) },
    1319            0 :             (Err(seq_err), Err(vec_err)) => {
    1320            0 :                 assert!(errors_match(seq_err, vec_err),
    1321            0 :                         "Mismatched errors: {seq_err} != {vec_err} - keyspace={keyspace:?} lsn={lsn}")},
    1322           12 :             (Ok(seq_values), Ok(vec_values)) => {
    1323          320 :                 seq_values.iter().zip(vec_values.iter()).for_each(|((seq_key, seq_res), (vec_key, vec_res))| {
    1324          320 :                     assert_eq!(seq_key, vec_key);
    1325          320 :                     match (seq_res, vec_res) {
    1326          320 :                         (Ok(seq_blob), Ok(vec_blob)) => {
    1327          320 :                             Self::validate_key_equivalence(seq_key, &keyspace, lsn, seq_blob, vec_blob);
    1328          320 :                         },
    1329            0 :                         (Err(err), Ok(_)) => {
    1330            0 :                             panic!(
    1331            0 :                                 concat!("Sequential get failed with {} for key {}, but vectored get did not",
    1332            0 :                                         " - keyspace={:?} lsn={}"),
    1333            0 :                                 err, seq_key, keyspace, lsn) },
    1334            0 :                         (Ok(_), Err(err)) => {
    1335            0 :                             panic!(
    1336            0 :                                 concat!("Vectored get failed with {} for key {}, but sequential get did not",
    1337            0 :                                         " - keyspace={:?} lsn={}"),
    1338            0 :                                 err, seq_key, keyspace, lsn) },
    1339            0 :                         (Err(_), Err(_)) => {}
    1340              :                     }
    1341          320 :                 })
    1342              :             }
    1343              :         }
    1344           12 :     }
    1345              : 
    1346          320 :     fn validate_key_equivalence(
    1347          320 :         key: &Key,
    1348          320 :         keyspace: &KeySpace,
    1349          320 :         lsn: Lsn,
    1350          320 :         seq: &Bytes,
    1351          320 :         vec: &Bytes,
    1352          320 :     ) {
    1353          320 :         if *key == AUX_FILES_KEY {
    1354              :             // The value reconstruct of AUX_FILES_KEY from records is not deterministic
    1355              :             // since it uses a hash map under the hood. Hence, deserialise both results
    1356              :             // before comparing.
    1357            0 :             let seq_aux_dir_res = AuxFilesDirectory::des(seq);
    1358            0 :             let vec_aux_dir_res = AuxFilesDirectory::des(vec);
    1359            0 :             match (&seq_aux_dir_res, &vec_aux_dir_res) {
    1360            0 :                 (Ok(seq_aux_dir), Ok(vec_aux_dir)) => {
    1361            0 :                     assert_eq!(
    1362              :                         seq_aux_dir, vec_aux_dir,
    1363            0 :                         "Mismatch for key {} - keyspace={:?} lsn={}",
    1364              :                         key, keyspace, lsn
    1365              :                     );
    1366              :                 }
    1367            0 :                 (Err(_), Err(_)) => {}
    1368              :                 _ => {
    1369            0 :                     panic!("Mismatch for {key}: {seq_aux_dir_res:?} != {vec_aux_dir_res:?}");
    1370              :                 }
    1371              :             }
    1372              :         } else {
    1373              :             // All other keys should reconstruct deterministically, so we simply compare the blobs.
    1374          320 :             assert_eq!(
    1375              :                 seq, vec,
    1376            0 :                 "Image mismatch for key {key} - keyspace={keyspace:?} lsn={lsn}"
    1377              :             );
    1378              :         }
    1379          320 :     }
    1380              : 
    1381              :     /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
    1382      5078176 :     pub(crate) fn get_last_record_lsn(&self) -> Lsn {
    1383      5078176 :         self.last_record_lsn.load().last
    1384      5078176 :     }
    1385              : 
    1386            0 :     pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
    1387            0 :         self.last_record_lsn.load().prev
    1388            0 :     }
    1389              : 
    1390              :     /// Atomically get both last and prev.
    1391          224 :     pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
    1392          224 :         self.last_record_lsn.load()
    1393          224 :     }
    1394              : 
    1395              :     /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
    1396              :     /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
    1397            0 :     pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
    1398            0 :         self.last_record_lsn.status_receiver()
    1399            0 :     }
    1400              : 
    1401         1099 :     pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
    1402         1099 :         self.disk_consistent_lsn.load()
    1403         1099 :     }
    1404              : 
    1405              :     /// remote_consistent_lsn from the perspective of the tenant's current generation,
    1406              :     /// not validated with control plane yet.
    1407              :     /// See [`Self::get_remote_consistent_lsn_visible`].
    1408            0 :     pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
    1409            0 :         self.remote_client.remote_consistent_lsn_projected()
    1410            0 :     }
    1411              : 
    1412              :     /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
    1413              :     /// i.e. a value of remote_consistent_lsn_projected which has undergone
    1414              :     /// generation validation in the deletion queue.
    1415            0 :     pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
    1416            0 :         self.remote_client.remote_consistent_lsn_visible()
    1417            0 :     }
    1418              : 
    1419              :     /// The sum of the file size of all historic layers in the layer map.
    1420              :     /// This method makes no distinction between local and remote layers.
    1421              :     /// Hence, the result **does not represent local filesystem usage**.
    1422            0 :     pub(crate) async fn layer_size_sum(&self) -> u64 {
    1423            0 :         let guard = self.layers.read().await;
    1424            0 :         let layer_map = guard.layer_map();
    1425            0 :         let mut size = 0;
    1426            0 :         for l in layer_map.iter_historic_layers() {
    1427            0 :             size += l.file_size();
    1428            0 :         }
    1429            0 :         size
    1430            0 :     }
    1431              : 
    1432            0 :     pub(crate) fn resident_physical_size(&self) -> u64 {
    1433            0 :         self.metrics.resident_physical_size_get()
    1434            0 :     }
    1435              : 
    1436            0 :     pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
    1437            0 :         array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
    1438            0 :     }
    1439              : 
    1440              :     ///
    1441              :     /// Wait until WAL has been received and processed up to this LSN.
    1442              :     ///
    1443              :     /// You should call this before any of the other get_* or list_* functions. Calling
    1444              :     /// those functions with an LSN that has been processed yet is an error.
    1445              :     ///
    1446       227444 :     pub(crate) async fn wait_lsn(
    1447       227444 :         &self,
    1448       227444 :         lsn: Lsn,
    1449       227444 :         who_is_waiting: WaitLsnWaiter<'_>,
    1450       227444 :         ctx: &RequestContext, /* Prepare for use by cancellation */
    1451       227444 :     ) -> Result<(), WaitLsnError> {
    1452       227444 :         if self.cancel.is_cancelled() {
    1453            0 :             return Err(WaitLsnError::Shutdown);
    1454       227444 :         } else if !self.is_active() {
    1455            0 :             return Err(WaitLsnError::BadState);
    1456       227444 :         }
    1457       227444 : 
    1458       227444 :         if cfg!(debug_assertions) {
    1459       227444 :             match ctx.task_kind() {
    1460              :                 TaskKind::WalReceiverManager
    1461              :                 | TaskKind::WalReceiverConnectionHandler
    1462              :                 | TaskKind::WalReceiverConnectionPoller => {
    1463            0 :                     let is_myself = match who_is_waiting {
    1464            0 :                         WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
    1465            0 :                         WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
    1466              :                     };
    1467            0 :                     if is_myself {
    1468            0 :                         if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
    1469              :                             // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
    1470            0 :                             panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
    1471            0 :                         }
    1472            0 :                     } else {
    1473            0 :                         // if another  timeline's  is waiting for us, there's no deadlock risk because
    1474            0 :                         // our walreceiver task can make progress independent of theirs
    1475            0 :                     }
    1476              :                 }
    1477       227444 :                 _ => {}
    1478              :             }
    1479            0 :         }
    1480              : 
    1481       227444 :         let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
    1482       227444 : 
    1483       227444 :         match self
    1484       227444 :             .last_record_lsn
    1485       227444 :             .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
    1486            0 :             .await
    1487              :         {
    1488       227444 :             Ok(()) => Ok(()),
    1489            0 :             Err(e) => {
    1490            0 :                 use utils::seqwait::SeqWaitError::*;
    1491            0 :                 match e {
    1492            0 :                     Shutdown => Err(WaitLsnError::Shutdown),
    1493              :                     Timeout => {
    1494              :                         // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
    1495            0 :                         drop(_timer);
    1496            0 :                         let walreceiver_status = self.walreceiver_status();
    1497            0 :                         Err(WaitLsnError::Timeout(format!(
    1498            0 :                         "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
    1499            0 :                         lsn,
    1500            0 :                         self.get_last_record_lsn(),
    1501            0 :                         self.get_disk_consistent_lsn(),
    1502            0 :                         walreceiver_status,
    1503            0 :                     )))
    1504              :                     }
    1505              :                 }
    1506              :             }
    1507              :         }
    1508       227444 :     }
    1509              : 
    1510            0 :     pub(crate) fn walreceiver_status(&self) -> String {
    1511            0 :         match &*self.walreceiver.lock().unwrap() {
    1512            0 :             None => "stopping or stopped".to_string(),
    1513            0 :             Some(walreceiver) => match walreceiver.status() {
    1514            0 :                 Some(status) => status.to_human_readable_string(),
    1515            0 :                 None => "Not active".to_string(),
    1516              :             },
    1517              :         }
    1518            0 :     }
    1519              : 
    1520              :     /// Check that it is valid to request operations with that lsn.
    1521          228 :     pub(crate) fn check_lsn_is_in_scope(
    1522          228 :         &self,
    1523          228 :         lsn: Lsn,
    1524          228 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1525          228 :     ) -> anyhow::Result<()> {
    1526          228 :         ensure!(
    1527          228 :             lsn >= **latest_gc_cutoff_lsn,
    1528            4 :             "LSN {} is earlier than latest GC horizon {} (we might've already garbage collected needed data)",
    1529            4 :             lsn,
    1530            4 :             **latest_gc_cutoff_lsn,
    1531              :         );
    1532          224 :         Ok(())
    1533          228 :     }
    1534              : 
    1535              :     /// Flush to disk all data that was written with the put_* functions
    1536         2076 :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    1537              :     pub(crate) async fn freeze_and_flush(&self) -> anyhow::Result<()> {
    1538              :         self.freeze_and_flush0().await
    1539              :     }
    1540              : 
    1541              :     // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
    1542              :     // polluting the span hierarchy.
    1543         1038 :     pub(crate) async fn freeze_and_flush0(&self) -> anyhow::Result<()> {
    1544         1038 :         let to_lsn = self.freeze_inmem_layer(false).await;
    1545         1038 :         self.flush_frozen_layers_and_wait(to_lsn).await
    1546         1038 :     }
    1547              : 
    1548              :     // Check if an open ephemeral layer should be closed: this provides
    1549              :     // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
    1550              :     // an ephemeral layer open forever when idle.  It also freezes layers if the global limit on
    1551              :     // ephemeral layer bytes has been breached.
    1552            0 :     pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
    1553            0 :         let Ok(_write_guard) = self.write_lock.try_lock() else {
    1554              :             // If the write lock is held, there is an active wal receiver: rolling open layers
    1555              :             // is their responsibility while they hold this lock.
    1556            0 :             return;
    1557              :         };
    1558              : 
    1559            0 :         let Ok(layers_guard) = self.layers.try_read() else {
    1560              :             // Don't block if the layer lock is busy
    1561            0 :             return;
    1562              :         };
    1563              : 
    1564            0 :         let Some(open_layer) = &layers_guard.layer_map().open_layer else {
    1565              :             // If there is no open layer, we have no layer freezing to do.  However, we might need to generate
    1566              :             // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
    1567              :             // that didn't result in writes to this shard.
    1568              : 
    1569              :             // Must not hold the layers lock while waiting for a flush.
    1570            0 :             drop(layers_guard);
    1571            0 : 
    1572            0 :             let last_record_lsn = self.get_last_record_lsn();
    1573            0 :             let disk_consistent_lsn = self.get_disk_consistent_lsn();
    1574            0 :             if last_record_lsn > disk_consistent_lsn {
    1575              :                 // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
    1576              :                 // we are a sharded tenant and have skipped some WAL
    1577            0 :                 let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
    1578            0 :                 if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
    1579              :                     // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
    1580              :                     // without any data ingested (yet) doesn't write a remote index as soon as it
    1581              :                     // sees its LSN advance: we only do this if we've been layer-less
    1582              :                     // for some time.
    1583            0 :                     tracing::debug!(
    1584            0 :                         "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
    1585              :                         disk_consistent_lsn,
    1586              :                         last_record_lsn
    1587              :                     );
    1588              : 
    1589              :                     // The flush loop will update remote consistent LSN as well as disk consistent LSN.
    1590            0 :                     self.flush_frozen_layers_and_wait(last_record_lsn)
    1591            0 :                         .await
    1592            0 :                         .ok();
    1593            0 :                 }
    1594            0 :             }
    1595              : 
    1596            0 :             return;
    1597              :         };
    1598              : 
    1599            0 :         let Some(current_size) = open_layer.try_len() else {
    1600              :             // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
    1601              :             // read lock to get size should always succeed.
    1602            0 :             tracing::warn!("Lock conflict while reading size of open layer");
    1603            0 :             return;
    1604              :         };
    1605              : 
    1606            0 :         let current_lsn = self.get_last_record_lsn();
    1607              : 
    1608            0 :         let checkpoint_distance_override = open_layer.tick().await;
    1609              : 
    1610            0 :         if let Some(size_override) = checkpoint_distance_override {
    1611            0 :             if current_size > size_override {
    1612              :                 // This is not harmful, but it only happens in relatively rare cases where
    1613              :                 // time-based checkpoints are not happening fast enough to keep the amount of
    1614              :                 // ephemeral data within configured limits.  It's a sign of stress on the system.
    1615            0 :                 tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
    1616            0 :             }
    1617            0 :         }
    1618              : 
    1619            0 :         let checkpoint_distance =
    1620            0 :             checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
    1621            0 : 
    1622            0 :         if self.should_roll(
    1623            0 :             current_size,
    1624            0 :             current_size,
    1625            0 :             checkpoint_distance,
    1626            0 :             self.get_last_record_lsn(),
    1627            0 :             self.last_freeze_at.load(),
    1628            0 :             open_layer.get_opened_at(),
    1629            0 :         ) {
    1630            0 :             match open_layer.info() {
    1631            0 :                 InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
    1632            0 :                     // We may reach this point if the layer was already frozen by not yet flushed: flushing
    1633            0 :                     // happens asynchronously in the background.
    1634            0 :                     tracing::debug!(
    1635            0 :                         "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
    1636              :                     );
    1637              :                 }
    1638              :                 InMemoryLayerInfo::Open { .. } => {
    1639              :                     // Upgrade to a write lock and freeze the layer
    1640            0 :                     drop(layers_guard);
    1641            0 :                     let mut layers_guard = self.layers.write().await;
    1642            0 :                     layers_guard
    1643            0 :                         .try_freeze_in_memory_layer(current_lsn, &self.last_freeze_at)
    1644            0 :                         .await;
    1645              :                 }
    1646              :             }
    1647            0 :             self.flush_frozen_layers();
    1648            0 :         }
    1649            0 :     }
    1650              : 
    1651              :     /// Outermost timeline compaction operation; downloads needed layers.
    1652          366 :     pub(crate) async fn compact(
    1653          366 :         self: &Arc<Self>,
    1654          366 :         cancel: &CancellationToken,
    1655          366 :         flags: EnumSet<CompactFlags>,
    1656          366 :         ctx: &RequestContext,
    1657          366 :     ) -> Result<(), CompactionError> {
    1658          366 :         // most likely the cancellation token is from background task, but in tests it could be the
    1659          366 :         // request task as well.
    1660          366 : 
    1661          366 :         let prepare = async move {
    1662          366 :             let guard = self.compaction_lock.lock().await;
    1663              : 
    1664          366 :             let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    1665          366 :                 BackgroundLoopKind::Compaction,
    1666          366 :                 ctx,
    1667          366 :             )
    1668            0 :             .await;
    1669              : 
    1670          366 :             (guard, permit)
    1671          366 :         };
    1672              : 
    1673              :         // this wait probably never needs any "long time spent" logging, because we already nag if
    1674              :         // compaction task goes over it's period (20s) which is quite often in production.
    1675          366 :         let (_guard, _permit) = tokio::select! {
    1676              :             tuple = prepare => { tuple },
    1677              :             _ = self.cancel.cancelled() => return Ok(()),
    1678              :             _ = cancel.cancelled() => return Ok(()),
    1679              :         };
    1680              : 
    1681          366 :         let last_record_lsn = self.get_last_record_lsn();
    1682          366 : 
    1683          366 :         // Last record Lsn could be zero in case the timeline was just created
    1684          366 :         if !last_record_lsn.is_valid() {
    1685            0 :             warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
    1686            0 :             return Ok(());
    1687          366 :         }
    1688          366 : 
    1689          366 :         match self.get_compaction_algorithm() {
    1690            0 :             CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await,
    1691        70424 :             CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
    1692              :         }
    1693          366 :     }
    1694              : 
    1695              :     /// Mutate the timeline with a [`TimelineWriter`].
    1696      5133116 :     pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
    1697      5133116 :         TimelineWriter {
    1698      5133116 :             tl: self,
    1699      5133116 :             write_guard: self.write_lock.lock().await,
    1700              :         }
    1701      5133116 :     }
    1702              : 
    1703            0 :     pub(crate) fn activate(
    1704            0 :         self: &Arc<Self>,
    1705            0 :         parent: Arc<crate::tenant::Tenant>,
    1706            0 :         broker_client: BrokerClientChannel,
    1707            0 :         background_jobs_can_start: Option<&completion::Barrier>,
    1708            0 :         ctx: &RequestContext,
    1709            0 :     ) {
    1710            0 :         if self.tenant_shard_id.is_shard_zero() {
    1711            0 :             // Logical size is only maintained accurately on shard zero.
    1712            0 :             self.spawn_initial_logical_size_computation_task(ctx);
    1713            0 :         }
    1714            0 :         self.launch_wal_receiver(ctx, broker_client);
    1715            0 :         self.set_state(TimelineState::Active);
    1716            0 :         self.launch_eviction_task(parent, background_jobs_can_start);
    1717            0 :     }
    1718              : 
    1719              :     /// After this function returns, there are no timeline-scoped tasks are left running.
    1720              :     ///
    1721              :     /// The preferred pattern for is:
    1722              :     /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
    1723              :     /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
    1724              :     ///   go the extra mile and keep track of JoinHandles
    1725              :     /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
    1726              :     ///   instead of spawning directly on a runtime. It is a more composable / testable pattern.
    1727              :     ///
    1728              :     /// For legacy reasons, we still have multiple tasks spawned using
    1729              :     /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
    1730              :     /// We refer to these as "timeline-scoped task_mgr tasks".
    1731              :     /// Some of these tasks are already sensitive to Timeline::cancel while others are
    1732              :     /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
    1733              :     /// or [`task_mgr::shutdown_watcher`].
    1734              :     /// We want to gradually convert the code base away from these.
    1735              :     ///
    1736              :     /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
    1737              :     /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
    1738              :     /// ones that aren't mentioned here):
    1739              :     /// - [`TaskKind::TimelineDeletionWorker`]
    1740              :     ///    - NB: also used for tenant deletion
    1741              :     /// - [`TaskKind::RemoteUploadTask`]`
    1742              :     /// - [`TaskKind::InitialLogicalSizeCalculation`]
    1743              :     /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
    1744              :     // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
    1745              :     /// - [`TaskKind::Eviction`]
    1746              :     /// - [`TaskKind::LayerFlushTask`]
    1747              :     /// - [`TaskKind::OndemandLogicalSizeCalculation`]
    1748              :     /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
    1749            8 :     pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
    1750            8 :         debug_assert_current_span_has_tenant_and_timeline_id();
    1751              : 
    1752            8 :         let try_freeze_and_flush = match mode {
    1753            6 :             ShutdownMode::FreezeAndFlush => true,
    1754            2 :             ShutdownMode::Hard => false,
    1755              :         };
    1756              : 
    1757              :         // Regardless of whether we're going to try_freeze_and_flush
    1758              :         // or not, stop ingesting any more data. Walreceiver only provides
    1759              :         // cancellation but no "wait until gone", because it uses the Timeline::gate.
    1760              :         // So, only after the self.gate.close() below will we know for sure that
    1761              :         // no walreceiver tasks are left.
    1762              :         // For `try_freeze_and_flush=true`, this means that we might still be ingesting
    1763              :         // data during the call to `self.freeze_and_flush()` below.
    1764              :         // That's not ideal, but, we don't have the concept of a ChildGuard,
    1765              :         // which is what we'd need to properly model early shutdown of the walreceiver
    1766              :         // task sub-tree before the other Timeline task sub-trees.
    1767            8 :         let walreceiver = self.walreceiver.lock().unwrap().take();
    1768            8 :         tracing::debug!(
    1769            0 :             is_some = walreceiver.is_some(),
    1770            0 :             "Waiting for WalReceiverManager..."
    1771              :         );
    1772            8 :         if let Some(walreceiver) = walreceiver {
    1773            0 :             walreceiver.cancel();
    1774            8 :         }
    1775              :         // ... and inform any waiters for newer LSNs that there won't be any.
    1776            8 :         self.last_record_lsn.shutdown();
    1777            8 : 
    1778            8 :         if try_freeze_and_flush {
    1779              :             // we shut down walreceiver above, so, we won't add anything more
    1780              :             // to the InMemoryLayer; freeze it and wait for all frozen layers
    1781              :             // to reach the disk & upload queue, then shut the upload queue and
    1782              :             // wait for it to drain.
    1783            6 :             match self.freeze_and_flush().await {
    1784              :                 Ok(_) => {
    1785              :                     // drain the upload queue
    1786              :                     // if we did not wait for completion here, it might be our shutdown process
    1787              :                     // didn't wait for remote uploads to complete at all, as new tasks can forever
    1788              :                     // be spawned.
    1789              :                     //
    1790              :                     // what is problematic is the shutting down of RemoteTimelineClient, because
    1791              :                     // obviously it does not make sense to stop while we wait for it, but what
    1792              :                     // about corner cases like s3 suddenly hanging up?
    1793            6 :                     self.remote_client.shutdown().await;
    1794              :                 }
    1795            0 :                 Err(e) => {
    1796            0 :                     // Non-fatal.  Shutdown is infallible.  Failures to flush just mean that
    1797            0 :                     // we have some extra WAL replay to do next time the timeline starts.
    1798            0 :                     warn!("failed to freeze and flush: {e:#}");
    1799              :                 }
    1800              :             }
    1801            2 :         }
    1802              : 
    1803              :         // Signal any subscribers to our cancellation token to drop out
    1804            8 :         tracing::debug!("Cancelling CancellationToken");
    1805            8 :         self.cancel.cancel();
    1806            8 : 
    1807            8 :         // Transition the remote_client into a state where it's only useful for timeline deletion.
    1808            8 :         // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
    1809            8 :         self.remote_client.stop();
    1810            8 :         // As documented in remote_client.stop()'s doc comment, it's our responsibility
    1811            8 :         // to shut down the upload queue tasks.
    1812            8 :         // TODO: fix that, task management should be encapsulated inside remote_client.
    1813            8 :         task_mgr::shutdown_tasks(
    1814            8 :             Some(TaskKind::RemoteUploadTask),
    1815            8 :             Some(self.tenant_shard_id),
    1816            8 :             Some(self.timeline_id),
    1817            8 :         )
    1818            0 :         .await;
    1819              : 
    1820              :         // TODO: work toward making this a no-op. See this funciton's doc comment for more context.
    1821            8 :         tracing::debug!("Waiting for tasks...");
    1822            8 :         task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
    1823              : 
    1824              :         // Finally wait until any gate-holders are complete.
    1825              :         //
    1826              :         // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
    1827              :         // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
    1828            8 :         self.gate.close().await;
    1829              : 
    1830            8 :         self.metrics.shutdown();
    1831            8 :     }
    1832              : 
    1833          352 :     pub(crate) fn set_state(&self, new_state: TimelineState) {
    1834          352 :         match (self.current_state(), new_state) {
    1835          352 :             (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
    1836            2 :                 info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
    1837              :             }
    1838            0 :             (st, TimelineState::Loading) => {
    1839            0 :                 error!("ignoring transition from {st:?} into Loading state");
    1840              :             }
    1841            0 :             (TimelineState::Broken { .. }, new_state) => {
    1842            0 :                 error!("Ignoring state update {new_state:?} for broken timeline");
    1843              :             }
    1844              :             (TimelineState::Stopping, TimelineState::Active) => {
    1845            0 :                 error!("Not activating a Stopping timeline");
    1846              :             }
    1847          350 :             (_, new_state) => {
    1848          350 :                 self.state.send_replace(new_state);
    1849          350 :             }
    1850              :         }
    1851          352 :     }
    1852              : 
    1853            2 :     pub(crate) fn set_broken(&self, reason: String) {
    1854            2 :         let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
    1855            2 :         let broken_state = TimelineState::Broken {
    1856            2 :             reason,
    1857            2 :             backtrace: backtrace_str,
    1858            2 :         };
    1859            2 :         self.set_state(broken_state);
    1860            2 : 
    1861            2 :         // Although the Broken state is not equivalent to shutdown() (shutdown will be called
    1862            2 :         // later when this tenant is detach or the process shuts down), firing the cancellation token
    1863            2 :         // here avoids the need for other tasks to watch for the Broken state explicitly.
    1864            2 :         self.cancel.cancel();
    1865            2 :     }
    1866              : 
    1867       229137 :     pub(crate) fn current_state(&self) -> TimelineState {
    1868       229137 :         self.state.borrow().clone()
    1869       229137 :     }
    1870              : 
    1871            6 :     pub(crate) fn is_broken(&self) -> bool {
    1872            6 :         matches!(&*self.state.borrow(), TimelineState::Broken { .. })
    1873            6 :     }
    1874              : 
    1875       227664 :     pub(crate) fn is_active(&self) -> bool {
    1876       227664 :         self.current_state() == TimelineState::Active
    1877       227664 :     }
    1878              : 
    1879         1121 :     pub(crate) fn is_stopping(&self) -> bool {
    1880         1121 :         self.current_state() == TimelineState::Stopping
    1881         1121 :     }
    1882              : 
    1883            0 :     pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
    1884            0 :         self.state.subscribe()
    1885            0 :     }
    1886              : 
    1887       227446 :     pub(crate) async fn wait_to_become_active(
    1888       227446 :         &self,
    1889       227446 :         _ctx: &RequestContext, // Prepare for use by cancellation
    1890       227446 :     ) -> Result<(), TimelineState> {
    1891       227446 :         let mut receiver = self.state.subscribe();
    1892       227446 :         loop {
    1893       227446 :             let current_state = receiver.borrow().clone();
    1894       227446 :             match current_state {
    1895              :                 TimelineState::Loading => {
    1896            0 :                     receiver
    1897            0 :                         .changed()
    1898            0 :                         .await
    1899            0 :                         .expect("holding a reference to self");
    1900              :                 }
    1901              :                 TimelineState::Active { .. } => {
    1902       227444 :                     return Ok(());
    1903              :                 }
    1904              :                 TimelineState::Broken { .. } | TimelineState::Stopping => {
    1905              :                     // There's no chance the timeline can transition back into ::Active
    1906            2 :                     return Err(current_state);
    1907              :                 }
    1908              :             }
    1909              :         }
    1910       227446 :     }
    1911              : 
    1912            0 :     pub(crate) async fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
    1913            0 :         let guard = self.layers.read().await;
    1914            0 :         let layer_map = guard.layer_map();
    1915            0 :         let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
    1916            0 :         if let Some(open_layer) = &layer_map.open_layer {
    1917            0 :             in_memory_layers.push(open_layer.info());
    1918            0 :         }
    1919            0 :         for frozen_layer in &layer_map.frozen_layers {
    1920            0 :             in_memory_layers.push(frozen_layer.info());
    1921            0 :         }
    1922              : 
    1923            0 :         let mut historic_layers = Vec::new();
    1924            0 :         for historic_layer in layer_map.iter_historic_layers() {
    1925            0 :             let historic_layer = guard.get_from_desc(&historic_layer);
    1926            0 :             historic_layers.push(historic_layer.info(reset));
    1927            0 :         }
    1928              : 
    1929            0 :         LayerMapInfo {
    1930            0 :             in_memory_layers,
    1931            0 :             historic_layers,
    1932            0 :         }
    1933            0 :     }
    1934              : 
    1935            0 :     #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
    1936              :     pub(crate) async fn download_layer(
    1937              :         &self,
    1938              :         layer_file_name: &LayerName,
    1939              :     ) -> anyhow::Result<Option<bool>> {
    1940              :         let Some(layer) = self.find_layer(layer_file_name).await else {
    1941              :             return Ok(None);
    1942              :         };
    1943              : 
    1944              :         layer.download().await?;
    1945              : 
    1946              :         Ok(Some(true))
    1947              :     }
    1948              : 
    1949              :     /// Evict just one layer.
    1950              :     ///
    1951              :     /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
    1952            0 :     pub(crate) async fn evict_layer(
    1953            0 :         &self,
    1954            0 :         layer_file_name: &LayerName,
    1955            0 :     ) -> anyhow::Result<Option<bool>> {
    1956            0 :         let _gate = self
    1957            0 :             .gate
    1958            0 :             .enter()
    1959            0 :             .map_err(|_| anyhow::anyhow!("Shutting down"))?;
    1960              : 
    1961            0 :         let Some(local_layer) = self.find_layer(layer_file_name).await else {
    1962            0 :             return Ok(None);
    1963              :         };
    1964              : 
    1965              :         // curl has this by default
    1966            0 :         let timeout = std::time::Duration::from_secs(120);
    1967            0 : 
    1968            0 :         match local_layer.evict_and_wait(timeout).await {
    1969            0 :             Ok(()) => Ok(Some(true)),
    1970            0 :             Err(EvictionError::NotFound) => Ok(Some(false)),
    1971            0 :             Err(EvictionError::Downloaded) => Ok(Some(false)),
    1972            0 :             Err(EvictionError::Timeout) => Ok(Some(false)),
    1973              :         }
    1974            0 :     }
    1975              : 
    1976           58 :     fn should_roll(
    1977           58 :         &self,
    1978           58 :         layer_size: u64,
    1979           58 :         projected_layer_size: u64,
    1980           58 :         checkpoint_distance: u64,
    1981           58 :         projected_lsn: Lsn,
    1982           58 :         last_freeze_at: Lsn,
    1983           58 :         opened_at: Instant,
    1984           58 :     ) -> bool {
    1985           58 :         let distance = projected_lsn.widening_sub(last_freeze_at);
    1986           58 : 
    1987           58 :         // Rolling the open layer can be triggered by:
    1988           58 :         // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
    1989           58 :         //    the safekeepers need to store.  For sharded tenants, we multiply by shard count to
    1990           58 :         //    account for how writes are distributed across shards: we expect each node to consume
    1991           58 :         //    1/count of the LSN on average.
    1992           58 :         // 2. The size of the currently open layer.
    1993           58 :         // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
    1994           58 :         //    up and suspend activity.
    1995           58 :         if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
    1996            0 :             info!(
    1997            0 :                 "Will roll layer at {} with layer size {} due to LSN distance ({})",
    1998              :                 projected_lsn, layer_size, distance
    1999              :             );
    2000              : 
    2001            0 :             true
    2002           58 :         } else if projected_layer_size >= checkpoint_distance {
    2003            0 :             info!(
    2004            0 :                 "Will roll layer at {} with layer size {} due to layer size ({})",
    2005              :                 projected_lsn, layer_size, projected_layer_size
    2006              :             );
    2007              : 
    2008            0 :             true
    2009           58 :         } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
    2010            0 :             info!(
    2011            0 :                     "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
    2012            0 :                     projected_lsn,
    2013            0 :                     layer_size,
    2014            0 :                     opened_at.elapsed()
    2015              :                 );
    2016              : 
    2017            0 :             true
    2018              :         } else {
    2019           58 :             false
    2020              :         }
    2021           58 :     }
    2022              : }
    2023              : 
    2024              : /// Number of times we will compute partition within a checkpoint distance.
    2025              : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
    2026              : 
    2027              : // Private functions
    2028              : impl Timeline {
    2029          166 :     pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
    2030          166 :         let tenant_conf = self.tenant_conf.load();
    2031          166 :         tenant_conf
    2032          166 :             .tenant_conf
    2033          166 :             .switch_aux_file_policy
    2034          166 :             .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
    2035          166 :     }
    2036              : 
    2037            0 :     pub(crate) fn get_lazy_slru_download(&self) -> bool {
    2038            0 :         let tenant_conf = self.tenant_conf.load();
    2039            0 :         tenant_conf
    2040            0 :             .tenant_conf
    2041            0 :             .lazy_slru_download
    2042            0 :             .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
    2043            0 :     }
    2044              : 
    2045         1256 :     fn get_checkpoint_distance(&self) -> u64 {
    2046         1256 :         let tenant_conf = self.tenant_conf.load();
    2047         1256 :         tenant_conf
    2048         1256 :             .tenant_conf
    2049         1256 :             .checkpoint_distance
    2050         1256 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
    2051         1256 :     }
    2052              : 
    2053           58 :     fn get_checkpoint_timeout(&self) -> Duration {
    2054           58 :         let tenant_conf = self.tenant_conf.load();
    2055           58 :         tenant_conf
    2056           58 :             .tenant_conf
    2057           58 :             .checkpoint_timeout
    2058           58 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
    2059           58 :     }
    2060              : 
    2061          480 :     fn get_compaction_target_size(&self) -> u64 {
    2062          480 :         let tenant_conf = self.tenant_conf.load();
    2063          480 :         tenant_conf
    2064          480 :             .tenant_conf
    2065          480 :             .compaction_target_size
    2066          480 :             .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
    2067          480 :     }
    2068              : 
    2069          366 :     fn get_compaction_threshold(&self) -> usize {
    2070          366 :         let tenant_conf = self.tenant_conf.load();
    2071          366 :         tenant_conf
    2072          366 :             .tenant_conf
    2073          366 :             .compaction_threshold
    2074          366 :             .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
    2075          366 :     }
    2076              : 
    2077           14 :     fn get_image_creation_threshold(&self) -> usize {
    2078           14 :         let tenant_conf = self.tenant_conf.load();
    2079           14 :         tenant_conf
    2080           14 :             .tenant_conf
    2081           14 :             .image_creation_threshold
    2082           14 :             .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
    2083           14 :     }
    2084              : 
    2085          366 :     fn get_compaction_algorithm(&self) -> CompactionAlgorithm {
    2086          366 :         let tenant_conf = &self.tenant_conf.load();
    2087          366 :         tenant_conf
    2088          366 :             .tenant_conf
    2089          366 :             .compaction_algorithm
    2090          366 :             .unwrap_or(self.conf.default_tenant_conf.compaction_algorithm)
    2091          366 :     }
    2092              : 
    2093            0 :     fn get_eviction_policy(&self) -> EvictionPolicy {
    2094            0 :         let tenant_conf = self.tenant_conf.load();
    2095            0 :         tenant_conf
    2096            0 :             .tenant_conf
    2097            0 :             .eviction_policy
    2098            0 :             .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
    2099            0 :     }
    2100              : 
    2101          360 :     fn get_evictions_low_residence_duration_metric_threshold(
    2102          360 :         tenant_conf: &TenantConfOpt,
    2103          360 :         default_tenant_conf: &TenantConf,
    2104          360 :     ) -> Duration {
    2105          360 :         tenant_conf
    2106          360 :             .evictions_low_residence_duration_metric_threshold
    2107          360 :             .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
    2108          360 :     }
    2109              : 
    2110          480 :     fn get_image_layer_creation_check_threshold(&self) -> u8 {
    2111          480 :         let tenant_conf = self.tenant_conf.load();
    2112          480 :         tenant_conf
    2113          480 :             .tenant_conf
    2114          480 :             .image_layer_creation_check_threshold
    2115          480 :             .unwrap_or(
    2116          480 :                 self.conf
    2117          480 :                     .default_tenant_conf
    2118          480 :                     .image_layer_creation_check_threshold,
    2119          480 :             )
    2120          480 :     }
    2121              : 
    2122            8 :     pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
    2123            8 :         // NB: Most tenant conf options are read by background loops, so,
    2124            8 :         // changes will automatically be picked up.
    2125            8 : 
    2126            8 :         // The threshold is embedded in the metric. So, we need to update it.
    2127            8 :         {
    2128            8 :             let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
    2129            8 :                 new_conf,
    2130            8 :                 &self.conf.default_tenant_conf,
    2131            8 :             );
    2132            8 : 
    2133            8 :             let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
    2134            8 :             let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
    2135            8 : 
    2136            8 :             let timeline_id_str = self.timeline_id.to_string();
    2137            8 :             self.metrics
    2138            8 :                 .evictions_with_low_residence_duration
    2139            8 :                 .write()
    2140            8 :                 .unwrap()
    2141            8 :                 .change_threshold(
    2142            8 :                     &tenant_id_str,
    2143            8 :                     &shard_id_str,
    2144            8 :                     &timeline_id_str,
    2145            8 :                     new_threshold,
    2146            8 :                 );
    2147            8 :         }
    2148            8 :     }
    2149              : 
    2150              :     /// Open a Timeline handle.
    2151              :     ///
    2152              :     /// Loads the metadata for the timeline into memory, but not the layer map.
    2153              :     #[allow(clippy::too_many_arguments)]
    2154          352 :     pub(super) fn new(
    2155          352 :         conf: &'static PageServerConf,
    2156          352 :         tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
    2157          352 :         metadata: &TimelineMetadata,
    2158          352 :         ancestor: Option<Arc<Timeline>>,
    2159          352 :         timeline_id: TimelineId,
    2160          352 :         tenant_shard_id: TenantShardId,
    2161          352 :         generation: Generation,
    2162          352 :         shard_identity: ShardIdentity,
    2163          352 :         walredo_mgr: Option<Arc<super::WalRedoManager>>,
    2164          352 :         resources: TimelineResources,
    2165          352 :         pg_version: u32,
    2166          352 :         state: TimelineState,
    2167          352 :         aux_file_policy: Option<AuxFilePolicy>,
    2168          352 :         cancel: CancellationToken,
    2169          352 :     ) -> Arc<Self> {
    2170          352 :         let disk_consistent_lsn = metadata.disk_consistent_lsn();
    2171          352 :         let (state, _) = watch::channel(state);
    2172          352 : 
    2173          352 :         let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
    2174          352 :         let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
    2175          352 : 
    2176          352 :         let evictions_low_residence_duration_metric_threshold = {
    2177          352 :             let loaded_tenant_conf = tenant_conf.load();
    2178          352 :             Self::get_evictions_low_residence_duration_metric_threshold(
    2179          352 :                 &loaded_tenant_conf.tenant_conf,
    2180          352 :                 &conf.default_tenant_conf,
    2181          352 :             )
    2182          352 :         };
    2183          352 : 
    2184          352 :         Arc::new_cyclic(|myself| {
    2185          352 :             let metrics = TimelineMetrics::new(
    2186          352 :                 &tenant_shard_id,
    2187          352 :                 &timeline_id,
    2188          352 :                 crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
    2189          352 :                     "mtime",
    2190          352 :                     evictions_low_residence_duration_metric_threshold,
    2191          352 :                 ),
    2192          352 :             );
    2193          352 :             let aux_file_metrics = metrics.aux_file_size_gauge.clone();
    2194              : 
    2195          352 :             let mut result = Timeline {
    2196          352 :                 conf,
    2197          352 :                 tenant_conf,
    2198          352 :                 myself: myself.clone(),
    2199          352 :                 timeline_id,
    2200          352 :                 tenant_shard_id,
    2201          352 :                 generation,
    2202          352 :                 shard_identity,
    2203          352 :                 pg_version,
    2204          352 :                 layers: Default::default(),
    2205          352 : 
    2206          352 :                 walredo_mgr,
    2207          352 :                 walreceiver: Mutex::new(None),
    2208          352 : 
    2209          352 :                 remote_client: Arc::new(resources.remote_client),
    2210          352 : 
    2211          352 :                 // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
    2212          352 :                 last_record_lsn: SeqWait::new(RecordLsn {
    2213          352 :                     last: disk_consistent_lsn,
    2214          352 :                     prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
    2215          352 :                 }),
    2216          352 :                 disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
    2217          352 : 
    2218          352 :                 last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
    2219          352 :                 last_freeze_ts: RwLock::new(Instant::now()),
    2220          352 : 
    2221          352 :                 loaded_at: (disk_consistent_lsn, SystemTime::now()),
    2222          352 : 
    2223          352 :                 ancestor_timeline: ancestor,
    2224          352 :                 ancestor_lsn: metadata.ancestor_lsn(),
    2225          352 : 
    2226          352 :                 metrics,
    2227          352 : 
    2228          352 :                 query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
    2229          352 :                     &tenant_shard_id,
    2230          352 :                     &timeline_id,
    2231          352 :                 ),
    2232          352 : 
    2233         2464 :                 directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
    2234          352 : 
    2235          352 :                 flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
    2236          352 : 
    2237          352 :                 layer_flush_start_tx,
    2238          352 :                 layer_flush_done_tx,
    2239          352 : 
    2240          352 :                 write_lock: tokio::sync::Mutex::new(None),
    2241          352 : 
    2242          352 :                 gc_info: std::sync::RwLock::new(GcInfo::default()),
    2243          352 : 
    2244          352 :                 latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
    2245          352 :                 initdb_lsn: metadata.initdb_lsn(),
    2246          352 : 
    2247          352 :                 current_logical_size: if disk_consistent_lsn.is_valid() {
    2248              :                     // we're creating timeline data with some layer files existing locally,
    2249              :                     // need to recalculate timeline's logical size based on data in the layers.
    2250          230 :                     LogicalSize::deferred_initial(disk_consistent_lsn)
    2251              :                 } else {
    2252              :                     // we're creating timeline data without any layers existing locally,
    2253              :                     // initial logical size is 0.
    2254          122 :                     LogicalSize::empty_initial()
    2255              :                 },
    2256          352 :                 partitioning: tokio::sync::Mutex::new((
    2257          352 :                     (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
    2258          352 :                     Lsn(0),
    2259          352 :                 )),
    2260          352 :                 repartition_threshold: 0,
    2261          352 :                 last_image_layer_creation_check_at: AtomicLsn::new(0),
    2262          352 : 
    2263          352 :                 last_received_wal: Mutex::new(None),
    2264          352 :                 rel_size_cache: RwLock::new(RelSizeCache {
    2265          352 :                     complete_as_of: disk_consistent_lsn,
    2266          352 :                     map: HashMap::new(),
    2267          352 :                 }),
    2268          352 : 
    2269          352 :                 download_all_remote_layers_task_info: RwLock::new(None),
    2270          352 : 
    2271          352 :                 state,
    2272          352 : 
    2273          352 :                 eviction_task_timeline_state: tokio::sync::Mutex::new(
    2274          352 :                     EvictionTaskTimelineState::default(),
    2275          352 :                 ),
    2276          352 :                 delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
    2277          352 : 
    2278          352 :                 cancel,
    2279          352 :                 gate: Gate::default(),
    2280          352 : 
    2281          352 :                 compaction_lock: tokio::sync::Mutex::default(),
    2282          352 :                 gc_lock: tokio::sync::Mutex::default(),
    2283          352 : 
    2284          352 :                 standby_horizon: AtomicLsn::new(0),
    2285          352 : 
    2286          352 :                 timeline_get_throttle: resources.timeline_get_throttle,
    2287          352 : 
    2288          352 :                 aux_files: tokio::sync::Mutex::new(AuxFilesState {
    2289          352 :                     dir: None,
    2290          352 :                     n_deltas: 0,
    2291          352 :                 }),
    2292          352 : 
    2293          352 :                 aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
    2294          352 : 
    2295          352 :                 last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy),
    2296          352 :             };
    2297          352 :             result.repartition_threshold =
    2298          352 :                 result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
    2299          352 : 
    2300          352 :             result
    2301          352 :                 .metrics
    2302          352 :                 .last_record_gauge
    2303          352 :                 .set(disk_consistent_lsn.0 as i64);
    2304          352 :             result
    2305          352 :         })
    2306          352 :     }
    2307              : 
    2308          460 :     pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
    2309          460 :         let Ok(guard) = self.gate.enter() else {
    2310            0 :             info!("cannot start flush loop when the timeline gate has already been closed");
    2311            0 :             return;
    2312              :         };
    2313          460 :         let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
    2314          460 :         match *flush_loop_state {
    2315          346 :             FlushLoopState::NotStarted => (),
    2316              :             FlushLoopState::Running { .. } => {
    2317          114 :                 info!(
    2318            0 :                     "skipping attempt to start flush_loop twice {}/{}",
    2319            0 :                     self.tenant_shard_id, self.timeline_id
    2320              :                 );
    2321          114 :                 return;
    2322              :             }
    2323              :             FlushLoopState::Exited => {
    2324            0 :                 warn!(
    2325            0 :                     "ignoring attempt to restart exited flush_loop {}/{}",
    2326            0 :                     self.tenant_shard_id, self.timeline_id
    2327              :                 );
    2328            0 :                 return;
    2329              :             }
    2330              :         }
    2331              : 
    2332          346 :         let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
    2333          346 :         let self_clone = Arc::clone(self);
    2334          346 : 
    2335          346 :         debug!("spawning flush loop");
    2336          346 :         *flush_loop_state = FlushLoopState::Running {
    2337          346 :             #[cfg(test)]
    2338          346 :             expect_initdb_optimization: false,
    2339          346 :             #[cfg(test)]
    2340          346 :             initdb_optimization_count: 0,
    2341          346 :         };
    2342          346 :         task_mgr::spawn(
    2343          346 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2344          346 :             task_mgr::TaskKind::LayerFlushTask,
    2345          346 :             Some(self.tenant_shard_id),
    2346          346 :             Some(self.timeline_id),
    2347          346 :             "layer flush task",
    2348              :             false,
    2349          346 :             async move {
    2350          346 :                 let _guard = guard;
    2351          346 :                 let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
    2352        59255 :                 self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
    2353            8 :                 let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
    2354            8 :                 assert!(matches!(*flush_loop_state, FlushLoopState::Running{ ..}));
    2355            8 :                 *flush_loop_state  = FlushLoopState::Exited;
    2356            8 :                 Ok(())
    2357            8 :             }
    2358          346 :             .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    2359              :         );
    2360          460 :     }
    2361              : 
    2362              :     /// Creates and starts the wal receiver.
    2363              :     ///
    2364              :     /// This function is expected to be called at most once per Timeline's lifecycle
    2365              :     /// when the timeline is activated.
    2366            0 :     fn launch_wal_receiver(
    2367            0 :         self: &Arc<Self>,
    2368            0 :         ctx: &RequestContext,
    2369            0 :         broker_client: BrokerClientChannel,
    2370            0 :     ) {
    2371            0 :         info!(
    2372            0 :             "launching WAL receiver for timeline {} of tenant {}",
    2373            0 :             self.timeline_id, self.tenant_shard_id
    2374              :         );
    2375              : 
    2376            0 :         let tenant_conf = self.tenant_conf.load();
    2377            0 :         let wal_connect_timeout = tenant_conf
    2378            0 :             .tenant_conf
    2379            0 :             .walreceiver_connect_timeout
    2380            0 :             .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
    2381            0 :         let lagging_wal_timeout = tenant_conf
    2382            0 :             .tenant_conf
    2383            0 :             .lagging_wal_timeout
    2384            0 :             .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
    2385            0 :         let max_lsn_wal_lag = tenant_conf
    2386            0 :             .tenant_conf
    2387            0 :             .max_lsn_wal_lag
    2388            0 :             .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
    2389            0 : 
    2390            0 :         let mut guard = self.walreceiver.lock().unwrap();
    2391            0 :         assert!(
    2392            0 :             guard.is_none(),
    2393            0 :             "multiple launches / re-launches of WAL receiver are not supported"
    2394              :         );
    2395            0 :         *guard = Some(WalReceiver::start(
    2396            0 :             Arc::clone(self),
    2397            0 :             WalReceiverConf {
    2398            0 :                 wal_connect_timeout,
    2399            0 :                 lagging_wal_timeout,
    2400            0 :                 max_lsn_wal_lag,
    2401            0 :                 auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
    2402            0 :                 availability_zone: self.conf.availability_zone.clone(),
    2403            0 :                 ingest_batch_size: self.conf.ingest_batch_size,
    2404            0 :             },
    2405            0 :             broker_client,
    2406            0 :             ctx,
    2407            0 :         ));
    2408            0 :     }
    2409              : 
    2410              :     /// Initialize with an empty layer map. Used when creating a new timeline.
    2411          346 :     pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
    2412          346 :         let mut layers = self.layers.try_write().expect(
    2413          346 :             "in the context where we call this function, no other task has access to the object",
    2414          346 :         );
    2415          346 :         layers.initialize_empty(Lsn(start_lsn.0));
    2416          346 :     }
    2417              : 
    2418              :     /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
    2419              :     /// files.
    2420            6 :     pub(super) async fn load_layer_map(
    2421            6 :         &self,
    2422            6 :         disk_consistent_lsn: Lsn,
    2423            6 :         index_part: Option<IndexPart>,
    2424            6 :     ) -> anyhow::Result<()> {
    2425              :         use init::{Decision::*, Discovered, DismissedLayer};
    2426              :         use LayerName::*;
    2427              : 
    2428            6 :         let mut guard = self.layers.write().await;
    2429              : 
    2430            6 :         let timer = self.metrics.load_layer_map_histo.start_timer();
    2431            6 : 
    2432            6 :         // Scan timeline directory and create ImageLayerName and DeltaFilename
    2433            6 :         // structs representing all files on disk
    2434            6 :         let timeline_path = self
    2435            6 :             .conf
    2436            6 :             .timeline_path(&self.tenant_shard_id, &self.timeline_id);
    2437            6 :         let conf = self.conf;
    2438            6 :         let span = tracing::Span::current();
    2439            6 : 
    2440            6 :         // Copy to move into the task we're about to spawn
    2441            6 :         let generation = self.generation;
    2442            6 :         let shard = self.get_shard_index();
    2443            6 :         let this = self.myself.upgrade().expect("&self method holds the arc");
    2444              : 
    2445            6 :         let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
    2446            6 :             move || {
    2447            6 :                 let _g = span.entered();
    2448            6 :                 let discovered = init::scan_timeline_dir(&timeline_path)?;
    2449            6 :                 let mut discovered_layers = Vec::with_capacity(discovered.len());
    2450            6 :                 let mut unrecognized_files = Vec::new();
    2451            6 : 
    2452            6 :                 let mut path = timeline_path;
    2453              : 
    2454           22 :                 for discovered in discovered {
    2455           16 :                     let (name, kind) = match discovered {
    2456           16 :                         Discovered::Layer(layer_file_name, local_path, file_size) => {
    2457           16 :                             discovered_layers.push((layer_file_name, local_path, file_size));
    2458           16 :                             continue;
    2459              :                         }
    2460              :                         Discovered::IgnoredBackup => {
    2461            0 :                             continue;
    2462              :                         }
    2463            0 :                         Discovered::Unknown(file_name) => {
    2464            0 :                             // we will later error if there are any
    2465            0 :                             unrecognized_files.push(file_name);
    2466            0 :                             continue;
    2467              :                         }
    2468            0 :                         Discovered::Ephemeral(name) => (name, "old ephemeral file"),
    2469            0 :                         Discovered::Temporary(name) => (name, "temporary timeline file"),
    2470            0 :                         Discovered::TemporaryDownload(name) => (name, "temporary download"),
    2471              :                     };
    2472            0 :                     path.push(Utf8Path::new(&name));
    2473            0 :                     init::cleanup(&path, kind)?;
    2474            0 :                     path.pop();
    2475              :                 }
    2476              : 
    2477            6 :                 if !unrecognized_files.is_empty() {
    2478              :                     // assume that if there are any there are many many.
    2479            0 :                     let n = unrecognized_files.len();
    2480            0 :                     let first = &unrecognized_files[..n.min(10)];
    2481            0 :                     anyhow::bail!(
    2482            0 :                         "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
    2483            0 :                     );
    2484            6 :                 }
    2485            6 : 
    2486            6 :                 let decided = init::reconcile(
    2487            6 :                     discovered_layers,
    2488            6 :                     index_part.as_ref(),
    2489            6 :                     disk_consistent_lsn,
    2490            6 :                     generation,
    2491            6 :                     shard,
    2492            6 :                 );
    2493            6 : 
    2494            6 :                 let mut loaded_layers = Vec::new();
    2495            6 :                 let mut needs_cleanup = Vec::new();
    2496            6 :                 let mut total_physical_size = 0;
    2497              : 
    2498           22 :                 for (name, decision) in decided {
    2499           16 :                     let decision = match decision {
    2500            0 :                         Ok(UseRemote { local, remote }) => {
    2501            0 :                             // Remote is authoritative, but we may still choose to retain
    2502            0 :                             // the local file if the contents appear to match
    2503            0 :                             if local.metadata.file_size() == remote.file_size() {
    2504              :                                 // Use the local file, but take the remote metadata so that we pick up
    2505              :                                 // the correct generation.
    2506            0 :                                 UseLocal(LocalLayerFileMetadata {
    2507            0 :                                     metadata: remote,
    2508            0 :                                     local_path: local.local_path,
    2509            0 :                                 })
    2510              :                             } else {
    2511            0 :                                 init::cleanup_local_file_for_remote(&local, &remote)?;
    2512            0 :                                 UseRemote { local, remote }
    2513              :                             }
    2514              :                         }
    2515           16 :                         Ok(decision) => decision,
    2516            0 :                         Err(DismissedLayer::Future { local }) => {
    2517            0 :                             if let Some(local) = local {
    2518            0 :                                 init::cleanup_future_layer(
    2519            0 :                                     &local.local_path,
    2520            0 :                                     &name,
    2521            0 :                                     disk_consistent_lsn,
    2522            0 :                                 )?;
    2523            0 :                             }
    2524            0 :                             needs_cleanup.push(name);
    2525            0 :                             continue;
    2526              :                         }
    2527            0 :                         Err(DismissedLayer::LocalOnly(local)) => {
    2528            0 :                             init::cleanup_local_only_file(&name, &local)?;
    2529              :                             // this file never existed remotely, we will have to do rework
    2530            0 :                             continue;
    2531              :                         }
    2532              :                     };
    2533              : 
    2534           16 :                     match &name {
    2535           12 :                         Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
    2536            4 :                         Image(i) => assert!(i.lsn <= disk_consistent_lsn),
    2537              :                     }
    2538              : 
    2539           16 :                     tracing::debug!(layer=%name, ?decision, "applied");
    2540              : 
    2541           16 :                     let layer = match decision {
    2542           16 :                         UseLocal(local) => {
    2543           16 :                             total_physical_size += local.metadata.file_size();
    2544           16 :                             Layer::for_resident(conf, &this, local.local_path, name, local.metadata)
    2545           16 :                                 .drop_eviction_guard()
    2546              :                         }
    2547            0 :                         Evicted(remote) | UseRemote { remote, .. } => {
    2548            0 :                             Layer::for_evicted(conf, &this, name, remote)
    2549              :                         }
    2550              :                     };
    2551              : 
    2552           16 :                     loaded_layers.push(layer);
    2553              :                 }
    2554            6 :                 Ok((loaded_layers, needs_cleanup, total_physical_size))
    2555            6 :             }
    2556            6 :         })
    2557            5 :         .await
    2558            6 :         .map_err(anyhow::Error::new)
    2559            6 :         .and_then(|x| x)?;
    2560              : 
    2561            6 :         let num_layers = loaded_layers.len();
    2562            6 : 
    2563            6 :         guard.initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
    2564            6 : 
    2565            6 :         self.remote_client
    2566            6 :             .schedule_layer_file_deletion(&needs_cleanup)?;
    2567            6 :         self.remote_client
    2568            6 :             .schedule_index_upload_for_file_changes()?;
    2569              :         // This barrier orders above DELETEs before any later operations.
    2570              :         // This is critical because code executing after the barrier might
    2571              :         // create again objects with the same key that we just scheduled for deletion.
    2572              :         // For example, if we just scheduled deletion of an image layer "from the future",
    2573              :         // later compaction might run again and re-create the same image layer.
    2574              :         // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
    2575              :         // "same" here means same key range and LSN.
    2576              :         //
    2577              :         // Without a barrier between above DELETEs and the re-creation's PUTs,
    2578              :         // the upload queue may execute the PUT first, then the DELETE.
    2579              :         // In our example, we will end up with an IndexPart referencing a non-existent object.
    2580              :         //
    2581              :         // 1. a future image layer is created and uploaded
    2582              :         // 2. ps restart
    2583              :         // 3. the future layer from (1) is deleted during load layer map
    2584              :         // 4. image layer is re-created and uploaded
    2585              :         // 5. deletion queue would like to delete (1) but actually deletes (4)
    2586              :         // 6. delete by name works as expected, but it now deletes the wrong (later) version
    2587              :         //
    2588              :         // See https://github.com/neondatabase/neon/issues/5878
    2589              :         //
    2590              :         // NB: generation numbers naturally protect against this because they disambiguate
    2591              :         //     (1) and (4)
    2592            6 :         self.remote_client.schedule_barrier()?;
    2593              :         // Tenant::create_timeline will wait for these uploads to happen before returning, or
    2594              :         // on retry.
    2595              : 
    2596            6 :         info!(
    2597            0 :             "loaded layer map with {} layers at {}, total physical size: {}",
    2598              :             num_layers, disk_consistent_lsn, total_physical_size
    2599              :         );
    2600              : 
    2601            6 :         timer.stop_and_record();
    2602            6 :         Ok(())
    2603            6 :     }
    2604              : 
    2605              :     /// Retrieve current logical size of the timeline.
    2606              :     ///
    2607              :     /// The size could be lagging behind the actual number, in case
    2608              :     /// the initial size calculation has not been run (gets triggered on the first size access).
    2609              :     ///
    2610              :     /// return size and boolean flag that shows if the size is exact
    2611            0 :     pub(crate) fn get_current_logical_size(
    2612            0 :         self: &Arc<Self>,
    2613            0 :         priority: GetLogicalSizePriority,
    2614            0 :         ctx: &RequestContext,
    2615            0 :     ) -> logical_size::CurrentLogicalSize {
    2616            0 :         if !self.tenant_shard_id.is_shard_zero() {
    2617              :             // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
    2618              :             // when HTTP API is serving a GET for timeline zero, return zero
    2619            0 :             return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
    2620            0 :         }
    2621            0 : 
    2622            0 :         let current_size = self.current_logical_size.current_size();
    2623            0 :         debug!("Current size: {current_size:?}");
    2624              : 
    2625            0 :         match (current_size.accuracy(), priority) {
    2626            0 :             (logical_size::Accuracy::Exact, _) => (), // nothing to do
    2627            0 :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
    2628            0 :                 // background task will eventually deliver an exact value, we're in no rush
    2629            0 :             }
    2630              :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
    2631              :                 // background task is not ready, but user is asking for it now;
    2632              :                 // => make the background task skip the line
    2633              :                 // (The alternative would be to calculate the size here, but,
    2634              :                 //  it can actually take a long time if the user has a lot of rels.
    2635              :                 //  And we'll inevitable need it again; So, let the background task do the work.)
    2636            0 :                 match self
    2637            0 :                     .current_logical_size
    2638            0 :                     .cancel_wait_for_background_loop_concurrency_limit_semaphore
    2639            0 :                     .get()
    2640              :                 {
    2641            0 :                     Some(cancel) => cancel.cancel(),
    2642              :                     None => {
    2643            0 :                         let state = self.current_state();
    2644            0 :                         if matches!(
    2645            0 :                             state,
    2646              :                             TimelineState::Broken { .. } | TimelineState::Stopping
    2647            0 :                         ) {
    2648            0 : 
    2649            0 :                             // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
    2650            0 :                             // Don't make noise.
    2651            0 :                         } else {
    2652            0 :                             warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
    2653            0 :                             debug_assert!(false);
    2654              :                         }
    2655              :                     }
    2656              :                 };
    2657              :             }
    2658              :         }
    2659              : 
    2660            0 :         if let CurrentLogicalSize::Approximate(_) = &current_size {
    2661            0 :             if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
    2662            0 :                 let first = self
    2663            0 :                     .current_logical_size
    2664            0 :                     .did_return_approximate_to_walreceiver
    2665            0 :                     .compare_exchange(
    2666            0 :                         false,
    2667            0 :                         true,
    2668            0 :                         AtomicOrdering::Relaxed,
    2669            0 :                         AtomicOrdering::Relaxed,
    2670            0 :                     )
    2671            0 :                     .is_ok();
    2672            0 :                 if first {
    2673            0 :                     crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
    2674            0 :                 }
    2675            0 :             }
    2676            0 :         }
    2677              : 
    2678            0 :         current_size
    2679            0 :     }
    2680              : 
    2681            0 :     fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
    2682            0 :         let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
    2683              :             // nothing to do for freshly created timelines;
    2684            0 :             assert_eq!(
    2685            0 :                 self.current_logical_size.current_size().accuracy(),
    2686            0 :                 logical_size::Accuracy::Exact,
    2687            0 :             );
    2688            0 :             self.current_logical_size.initialized.add_permits(1);
    2689            0 :             return;
    2690              :         };
    2691              : 
    2692            0 :         let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
    2693            0 :         let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
    2694            0 :         self.current_logical_size
    2695            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
    2696            0 :             .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
    2697            0 : 
    2698            0 :         let self_clone = Arc::clone(self);
    2699            0 :         let background_ctx = ctx.detached_child(
    2700            0 :             TaskKind::InitialLogicalSizeCalculation,
    2701            0 :             DownloadBehavior::Download,
    2702            0 :         );
    2703            0 :         task_mgr::spawn(
    2704            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2705            0 :             task_mgr::TaskKind::InitialLogicalSizeCalculation,
    2706            0 :             Some(self.tenant_shard_id),
    2707            0 :             Some(self.timeline_id),
    2708            0 :             "initial size calculation",
    2709              :             false,
    2710              :             // NB: don't log errors here, task_mgr will do that.
    2711            0 :             async move {
    2712            0 :                 let cancel = task_mgr::shutdown_token();
    2713            0 :                 self_clone
    2714            0 :                     .initial_logical_size_calculation_task(
    2715            0 :                         initial_part_end,
    2716            0 :                         cancel_wait_for_background_loop_concurrency_limit_semaphore,
    2717            0 :                         cancel,
    2718            0 :                         background_ctx,
    2719            0 :                     )
    2720            0 :                     .await;
    2721            0 :                 Ok(())
    2722            0 :             }
    2723            0 :             .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
    2724              :         );
    2725            0 :     }
    2726              : 
    2727            0 :     async fn initial_logical_size_calculation_task(
    2728            0 :         self: Arc<Self>,
    2729            0 :         initial_part_end: Lsn,
    2730            0 :         skip_concurrency_limiter: CancellationToken,
    2731            0 :         cancel: CancellationToken,
    2732            0 :         background_ctx: RequestContext,
    2733            0 :     ) {
    2734              :         scopeguard::defer! {
    2735              :             // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
    2736              :             self.current_logical_size.initialized.add_permits(1);
    2737              :         }
    2738              : 
    2739              :         enum BackgroundCalculationError {
    2740              :             Cancelled,
    2741              :             Other(anyhow::Error),
    2742              :         }
    2743              : 
    2744            0 :         let try_once = |attempt: usize| {
    2745            0 :             let background_ctx = &background_ctx;
    2746            0 :             let self_ref = &self;
    2747            0 :             let skip_concurrency_limiter = &skip_concurrency_limiter;
    2748            0 :             async move {
    2749            0 :                 let cancel = task_mgr::shutdown_token();
    2750            0 :                 let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    2751            0 :                     BackgroundLoopKind::InitialLogicalSizeCalculation,
    2752            0 :                     background_ctx,
    2753            0 :                 );
    2754              : 
    2755              :                 use crate::metrics::initial_logical_size::StartCircumstances;
    2756            0 :                 let (_maybe_permit, circumstances) = tokio::select! {
    2757              :                     permit = wait_for_permit => {
    2758              :                         (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
    2759              :                     }
    2760              :                     _ = self_ref.cancel.cancelled() => {
    2761              :                         return Err(BackgroundCalculationError::Cancelled);
    2762              :                     }
    2763              :                     _ = cancel.cancelled() => {
    2764              :                         return Err(BackgroundCalculationError::Cancelled);
    2765              :                     },
    2766              :                     () = skip_concurrency_limiter.cancelled() => {
    2767              :                         // Some action that is part of a end user interaction requested logical size
    2768              :                         // => break out of the rate limit
    2769              :                         // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
    2770              :                         // but then again what happens if they cancel; also, we should just be using
    2771              :                         // one runtime across the entire process, so, let's leave this for now.
    2772              :                         (None, StartCircumstances::SkippedConcurrencyLimiter)
    2773              :                     }
    2774              :                 };
    2775              : 
    2776            0 :                 let metrics_guard = if attempt == 1 {
    2777            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
    2778              :                 } else {
    2779            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
    2780              :                 };
    2781              : 
    2782            0 :                 match self_ref
    2783            0 :                     .logical_size_calculation_task(
    2784            0 :                         initial_part_end,
    2785            0 :                         LogicalSizeCalculationCause::Initial,
    2786            0 :                         background_ctx,
    2787            0 :                     )
    2788            0 :                     .await
    2789              :                 {
    2790            0 :                     Ok(calculated_size) => Ok((calculated_size, metrics_guard)),
    2791              :                     Err(CalculateLogicalSizeError::Cancelled) => {
    2792            0 :                         Err(BackgroundCalculationError::Cancelled)
    2793              :                     }
    2794            0 :                     Err(CalculateLogicalSizeError::Other(err)) => {
    2795            0 :                         if let Some(PageReconstructError::AncestorStopping(_)) =
    2796            0 :                             err.root_cause().downcast_ref()
    2797              :                         {
    2798            0 :                             Err(BackgroundCalculationError::Cancelled)
    2799              :                         } else {
    2800            0 :                             Err(BackgroundCalculationError::Other(err))
    2801              :                         }
    2802              :                     }
    2803              :                 }
    2804            0 :             }
    2805            0 :         };
    2806              : 
    2807            0 :         let retrying = async {
    2808            0 :             let mut attempt = 0;
    2809            0 :             loop {
    2810            0 :                 attempt += 1;
    2811            0 : 
    2812            0 :                 match try_once(attempt).await {
    2813            0 :                     Ok(res) => return ControlFlow::Continue(res),
    2814            0 :                     Err(BackgroundCalculationError::Cancelled) => return ControlFlow::Break(()),
    2815            0 :                     Err(BackgroundCalculationError::Other(e)) => {
    2816            0 :                         warn!(attempt, "initial size calculation failed: {e:?}");
    2817              :                         // exponential back-off doesn't make sense at these long intervals;
    2818              :                         // use fixed retry interval with generous jitter instead
    2819            0 :                         let sleep_duration = Duration::from_secs(
    2820            0 :                             u64::try_from(
    2821            0 :                                 // 1hour base
    2822            0 :                                 (60_i64 * 60_i64)
    2823            0 :                                     // 10min jitter
    2824            0 :                                     + rand::thread_rng().gen_range(-10 * 60..10 * 60),
    2825            0 :                             )
    2826            0 :                             .expect("10min < 1hour"),
    2827            0 :                         );
    2828            0 :                         tokio::time::sleep(sleep_duration).await;
    2829              :                     }
    2830              :                 }
    2831              :             }
    2832            0 :         };
    2833              : 
    2834            0 :         let (calculated_size, metrics_guard) = tokio::select! {
    2835              :             res = retrying  => {
    2836              :                 match res {
    2837              :                     ControlFlow::Continue(calculated_size) => calculated_size,
    2838              :                     ControlFlow::Break(()) => return,
    2839              :                 }
    2840              :             }
    2841              :             _ = cancel.cancelled() => {
    2842              :                 return;
    2843              :             }
    2844              :         };
    2845              : 
    2846              :         // we cannot query current_logical_size.current_size() to know the current
    2847              :         // *negative* value, only truncated to u64.
    2848            0 :         let added = self
    2849            0 :             .current_logical_size
    2850            0 :             .size_added_after_initial
    2851            0 :             .load(AtomicOrdering::Relaxed);
    2852            0 : 
    2853            0 :         let sum = calculated_size.saturating_add_signed(added);
    2854            0 : 
    2855            0 :         // set the gauge value before it can be set in `update_current_logical_size`.
    2856            0 :         self.metrics.current_logical_size_gauge.set(sum);
    2857            0 : 
    2858            0 :         self.current_logical_size
    2859            0 :             .initial_logical_size
    2860            0 :             .set((calculated_size, metrics_guard.calculation_result_saved()))
    2861            0 :             .ok()
    2862            0 :             .expect("only this task sets it");
    2863            0 :     }
    2864              : 
    2865            0 :     pub(crate) fn spawn_ondemand_logical_size_calculation(
    2866            0 :         self: &Arc<Self>,
    2867            0 :         lsn: Lsn,
    2868            0 :         cause: LogicalSizeCalculationCause,
    2869            0 :         ctx: RequestContext,
    2870            0 :     ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
    2871            0 :         let (sender, receiver) = oneshot::channel();
    2872            0 :         let self_clone = Arc::clone(self);
    2873            0 :         // XXX if our caller loses interest, i.e., ctx is cancelled,
    2874            0 :         // we should stop the size calculation work and return an error.
    2875            0 :         // That would require restructuring this function's API to
    2876            0 :         // return the result directly, instead of a Receiver for the result.
    2877            0 :         let ctx = ctx.detached_child(
    2878            0 :             TaskKind::OndemandLogicalSizeCalculation,
    2879            0 :             DownloadBehavior::Download,
    2880            0 :         );
    2881            0 :         task_mgr::spawn(
    2882            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2883            0 :             task_mgr::TaskKind::OndemandLogicalSizeCalculation,
    2884            0 :             Some(self.tenant_shard_id),
    2885            0 :             Some(self.timeline_id),
    2886            0 :             "ondemand logical size calculation",
    2887            0 :             false,
    2888            0 :             async move {
    2889            0 :                 let res = self_clone
    2890            0 :                     .logical_size_calculation_task(lsn, cause, &ctx)
    2891            0 :                     .await;
    2892            0 :                 let _ = sender.send(res).ok();
    2893            0 :                 Ok(()) // Receiver is responsible for handling errors
    2894            0 :             }
    2895            0 :             .in_current_span(),
    2896            0 :         );
    2897            0 :         receiver
    2898            0 :     }
    2899              : 
    2900              :     /// # Cancel-Safety
    2901              :     ///
    2902              :     /// This method is cancellation-safe.
    2903            0 :     #[instrument(skip_all)]
    2904              :     async fn logical_size_calculation_task(
    2905              :         self: &Arc<Self>,
    2906              :         lsn: Lsn,
    2907              :         cause: LogicalSizeCalculationCause,
    2908              :         ctx: &RequestContext,
    2909              :     ) -> Result<u64, CalculateLogicalSizeError> {
    2910              :         crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
    2911              :         // We should never be calculating logical sizes on shard !=0, because these shards do not have
    2912              :         // accurate relation sizes, and they do not emit consumption metrics.
    2913              :         debug_assert!(self.tenant_shard_id.is_shard_zero());
    2914              : 
    2915              :         let guard = self
    2916              :             .gate
    2917              :             .enter()
    2918            0 :             .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
    2919              : 
    2920              :         let self_calculation = Arc::clone(self);
    2921              : 
    2922            0 :         let mut calculation = pin!(async {
    2923            0 :             let ctx = ctx.attached_child();
    2924            0 :             self_calculation
    2925            0 :                 .calculate_logical_size(lsn, cause, &guard, &ctx)
    2926            0 :                 .await
    2927            0 :         });
    2928              : 
    2929              :         tokio::select! {
    2930              :             res = &mut calculation => { res }
    2931              :             _ = self.cancel.cancelled() => {
    2932              :                 debug!("cancelling logical size calculation for timeline shutdown");
    2933              :                 calculation.await
    2934              :             }
    2935              :         }
    2936              :     }
    2937              : 
    2938              :     /// Calculate the logical size of the database at the latest LSN.
    2939              :     ///
    2940              :     /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
    2941              :     /// especially if we need to download remote layers.
    2942              :     ///
    2943              :     /// # Cancel-Safety
    2944              :     ///
    2945              :     /// This method is cancellation-safe.
    2946            0 :     async fn calculate_logical_size(
    2947            0 :         &self,
    2948            0 :         up_to_lsn: Lsn,
    2949            0 :         cause: LogicalSizeCalculationCause,
    2950            0 :         _guard: &GateGuard,
    2951            0 :         ctx: &RequestContext,
    2952            0 :     ) -> Result<u64, CalculateLogicalSizeError> {
    2953            0 :         info!(
    2954            0 :             "Calculating logical size for timeline {} at {}",
    2955              :             self.timeline_id, up_to_lsn
    2956              :         );
    2957              : 
    2958              :         pausable_failpoint!("timeline-calculate-logical-size-pause");
    2959              : 
    2960              :         // See if we've already done the work for initial size calculation.
    2961              :         // This is a short-cut for timelines that are mostly unused.
    2962            0 :         if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
    2963            0 :             return Ok(size);
    2964            0 :         }
    2965            0 :         let storage_time_metrics = match cause {
    2966              :             LogicalSizeCalculationCause::Initial
    2967              :             | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
    2968            0 :             | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
    2969              :             LogicalSizeCalculationCause::EvictionTaskImitation => {
    2970            0 :                 &self.metrics.imitate_logical_size_histo
    2971              :             }
    2972              :         };
    2973            0 :         let timer = storage_time_metrics.start_timer();
    2974            0 :         let logical_size = self
    2975            0 :             .get_current_logical_size_non_incremental(up_to_lsn, ctx)
    2976            0 :             .await?;
    2977            0 :         debug!("calculated logical size: {logical_size}");
    2978            0 :         timer.stop_and_record();
    2979            0 :         Ok(logical_size)
    2980            0 :     }
    2981              : 
    2982              :     /// Update current logical size, adding `delta' to the old value.
    2983       270570 :     fn update_current_logical_size(&self, delta: i64) {
    2984       270570 :         let logical_size = &self.current_logical_size;
    2985       270570 :         logical_size.increment_size(delta);
    2986       270570 : 
    2987       270570 :         // Also set the value in the prometheus gauge. Note that
    2988       270570 :         // there is a race condition here: if this is is called by two
    2989       270570 :         // threads concurrently, the prometheus gauge might be set to
    2990       270570 :         // one value while current_logical_size is set to the
    2991       270570 :         // other.
    2992       270570 :         match logical_size.current_size() {
    2993       270570 :             CurrentLogicalSize::Exact(ref new_current_size) => self
    2994       270570 :                 .metrics
    2995       270570 :                 .current_logical_size_gauge
    2996       270570 :                 .set(new_current_size.into()),
    2997            0 :             CurrentLogicalSize::Approximate(_) => {
    2998            0 :                 // don't update the gauge yet, this allows us not to update the gauge back and
    2999            0 :                 // forth between the initial size calculation task.
    3000            0 :             }
    3001              :         }
    3002       270570 :     }
    3003              : 
    3004         2674 :     pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
    3005         2674 :         self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
    3006         2674 :         let aux_metric =
    3007         2674 :             self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
    3008         2674 : 
    3009         2674 :         let sum_of_entries = self
    3010         2674 :             .directory_metrics
    3011         2674 :             .iter()
    3012        18718 :             .map(|v| v.load(AtomicOrdering::Relaxed))
    3013         2674 :             .sum();
    3014         2674 :         // Set a high general threshold and a lower threshold for the auxiliary files,
    3015         2674 :         // as we can have large numbers of relations in the db directory.
    3016         2674 :         const SUM_THRESHOLD: u64 = 5000;
    3017         2674 :         const AUX_THRESHOLD: u64 = 1000;
    3018         2674 :         if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
    3019            0 :             self.metrics
    3020            0 :                 .directory_entries_count_gauge
    3021            0 :                 .set(sum_of_entries);
    3022         2674 :         } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
    3023            0 :             metric.set(sum_of_entries);
    3024         2674 :         }
    3025         2674 :     }
    3026              : 
    3027            0 :     async fn find_layer(&self, layer_name: &LayerName) -> Option<Layer> {
    3028            0 :         let guard = self.layers.read().await;
    3029            0 :         for historic_layer in guard.layer_map().iter_historic_layers() {
    3030            0 :             let historic_layer_name = historic_layer.layer_name();
    3031            0 :             if layer_name == &historic_layer_name {
    3032            0 :                 return Some(guard.get_from_desc(&historic_layer));
    3033            0 :             }
    3034              :         }
    3035              : 
    3036            0 :         None
    3037            0 :     }
    3038              : 
    3039              :     /// The timeline heatmap is a hint to secondary locations from the primary location,
    3040              :     /// indicating which layers are currently on-disk on the primary.
    3041              :     ///
    3042              :     /// None is returned if the Timeline is in a state where uploading a heatmap
    3043              :     /// doesn't make sense, such as shutting down or initializing.  The caller
    3044              :     /// should treat this as a cue to simply skip doing any heatmap uploading
    3045              :     /// for this timeline.
    3046            0 :     pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
    3047            0 :         if !self.is_active() {
    3048            0 :             return None;
    3049            0 :         }
    3050              : 
    3051            0 :         let guard = self.layers.read().await;
    3052              : 
    3053            0 :         let resident = guard.likely_resident_layers().map(|layer| {
    3054            0 :             let last_activity_ts = layer.access_stats().latest_activity_or_now();
    3055            0 : 
    3056            0 :             HeatMapLayer::new(
    3057            0 :                 layer.layer_desc().layer_name(),
    3058            0 :                 (&layer.metadata()).into(),
    3059            0 :                 last_activity_ts,
    3060            0 :             )
    3061            0 :         });
    3062            0 : 
    3063            0 :         let layers = resident.collect();
    3064            0 : 
    3065            0 :         Some(HeatMapTimeline::new(self.timeline_id, layers))
    3066            0 :     }
    3067              : 
    3068              :     /// Returns true if the given lsn is or was an ancestor branchpoint.
    3069            0 :     pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
    3070            0 :         // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
    3071            0 :         // branchpoint in the value in IndexPart::lineage
    3072            0 :         self.ancestor_lsn == lsn
    3073            0 :             || (self.ancestor_lsn == Lsn::INVALID
    3074            0 :                 && self.remote_client.is_previous_ancestor_lsn(lsn))
    3075            0 :     }
    3076              : }
    3077              : 
    3078              : type TraversalId = Arc<str>;
    3079              : 
    3080              : trait TraversalLayerExt {
    3081              :     fn traversal_id(&self) -> TraversalId;
    3082              : }
    3083              : 
    3084              : impl TraversalLayerExt for Layer {
    3085       210377 :     fn traversal_id(&self) -> TraversalId {
    3086       210377 :         Arc::clone(self.debug_str())
    3087       210377 :     }
    3088              : }
    3089              : 
    3090              : impl TraversalLayerExt for Arc<InMemoryLayer> {
    3091       605871 :     fn traversal_id(&self) -> TraversalId {
    3092       605871 :         Arc::clone(self.local_path_str())
    3093       605871 :     }
    3094              : }
    3095              : 
    3096              : impl Timeline {
    3097              :     ///
    3098              :     /// Get a handle to a Layer for reading.
    3099              :     ///
    3100              :     /// The returned Layer might be from an ancestor timeline, if the
    3101              :     /// segment hasn't been updated on this timeline yet.
    3102              :     ///
    3103              :     /// This function takes the current timeline's locked LayerMap as an argument,
    3104              :     /// so callers can avoid potential race conditions.
    3105              :     ///
    3106              :     /// # Cancel-Safety
    3107              :     ///
    3108              :     /// This method is cancellation-safe.
    3109       625369 :     async fn get_reconstruct_data(
    3110       625369 :         &self,
    3111       625369 :         key: Key,
    3112       625369 :         request_lsn: Lsn,
    3113       625369 :         reconstruct_state: &mut ValueReconstructState,
    3114       625369 :         ctx: &RequestContext,
    3115       625369 :     ) -> Result<Vec<TraversalPathItem>, PageReconstructError> {
    3116       625369 :         // Start from the current timeline.
    3117       625369 :         let mut timeline_owned;
    3118       625369 :         let mut timeline = self;
    3119       625369 : 
    3120       625369 :         let mut read_count = scopeguard::guard(0, |cnt| {
    3121       625369 :             crate::metrics::READ_NUM_LAYERS_VISITED.observe(cnt as f64)
    3122       625369 :         });
    3123       625369 : 
    3124       625369 :         // For debugging purposes, collect the path of layers that we traversed
    3125       625369 :         // through. It's included in the error message if we fail to find the key.
    3126       625369 :         let mut traversal_path = Vec::<TraversalPathItem>::new();
    3127              : 
    3128       625369 :         let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img {
    3129            0 :             *cached_lsn
    3130              :         } else {
    3131       625369 :             Lsn(0)
    3132              :         };
    3133              : 
    3134              :         // 'prev_lsn' tracks the last LSN that we were at in our search. It's used
    3135              :         // to check that each iteration make some progress, to break infinite
    3136              :         // looping if something goes wrong.
    3137       625369 :         let mut prev_lsn = None;
    3138       625369 : 
    3139       625369 :         let mut result = ValueReconstructResult::Continue;
    3140       625369 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3141              : 
    3142      1669145 :         'outer: loop {
    3143      1669145 :             if self.cancel.is_cancelled() {
    3144            0 :                 return Err(PageReconstructError::Cancelled);
    3145      1669145 :             }
    3146      1669145 : 
    3147      1669145 :             // The function should have updated 'state'
    3148      1669145 :             //info!("CALLED for {} at {}: {:?} with {} records, cached {}", key, cont_lsn, result, reconstruct_state.records.len(), cached_lsn);
    3149      1669145 :             match result {
    3150       625245 :                 ValueReconstructResult::Complete => return Ok(traversal_path),
    3151              :                 ValueReconstructResult::Continue => {
    3152              :                     // If we reached an earlier cached page image, we're done.
    3153      1043886 :                     if cont_lsn == cached_lsn + 1 {
    3154            0 :                         MATERIALIZED_PAGE_CACHE_HIT.inc_by(1);
    3155            0 :                         return Ok(traversal_path);
    3156      1043886 :                     }
    3157      1043886 :                     if let Some(prev) = prev_lsn {
    3158       191107 :                         if prev <= cont_lsn {
    3159              :                             // Didn't make any progress in last iteration. Error out to avoid
    3160              :                             // getting stuck in the loop.
    3161          108 :                             return Err(PageReconstructError::MissingKey(MissingKeyError {
    3162          108 :                                 key,
    3163          108 :                                 shard: self.shard_identity.get_shard_number(&key),
    3164          108 :                                 cont_lsn: Lsn(cont_lsn.0 - 1),
    3165          108 :                                 request_lsn,
    3166          108 :                                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3167          108 :                                 traversal_path,
    3168          108 :                                 backtrace: None,
    3169          108 :                             }));
    3170       190999 :                         }
    3171       852779 :                     }
    3172      1043778 :                     prev_lsn = Some(cont_lsn);
    3173              :                 }
    3174              :                 ValueReconstructResult::Missing => {
    3175              :                     return Err(PageReconstructError::MissingKey(MissingKeyError {
    3176           14 :                         key,
    3177           14 :                         shard: self.shard_identity.get_shard_number(&key),
    3178           14 :                         cont_lsn,
    3179           14 :                         request_lsn,
    3180           14 :                         ancestor_lsn: None,
    3181           14 :                         traversal_path,
    3182           14 :                         backtrace: if cfg!(test) {
    3183           14 :                             Some(std::backtrace::Backtrace::force_capture())
    3184              :                         } else {
    3185            0 :                             None
    3186              :                         },
    3187              :                     }));
    3188              :                 }
    3189              :             }
    3190              : 
    3191              :             // Recurse into ancestor if needed
    3192      1043778 :             if is_inherited_key(key) && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn {
    3193       227412 :                 trace!(
    3194            0 :                     "going into ancestor {}, cont_lsn is {}",
    3195              :                     timeline.ancestor_lsn,
    3196              :                     cont_lsn
    3197              :                 );
    3198              : 
    3199       227412 :                 timeline_owned = timeline.get_ready_ancestor_timeline(ctx).await?;
    3200       227410 :                 timeline = &*timeline_owned;
    3201       227410 :                 prev_lsn = None;
    3202       227410 :                 continue 'outer;
    3203       816366 :             }
    3204              : 
    3205       816366 :             let guard = timeline.layers.read().await;
    3206       816366 :             let layers = guard.layer_map();
    3207              : 
    3208              :             // Check the open and frozen in-memory layers first, in order from newest
    3209              :             // to oldest.
    3210       816366 :             if let Some(open_layer) = &layers.open_layer {
    3211       717384 :                 let start_lsn = open_layer.get_lsn_range().start;
    3212       717384 :                 if cont_lsn > start_lsn {
    3213              :                     //info!("CHECKING for {} at {} on open layer {}", key, cont_lsn, open_layer.layer_name().display());
    3214              :                     // Get all the data needed to reconstruct the page version from this layer.
    3215              :                     // But if we have an older cached page image, no need to go past that.
    3216       604275 :                     let lsn_floor = max(cached_lsn + 1, start_lsn);
    3217       604275 : 
    3218       604275 :                     let open_layer = open_layer.clone();
    3219       604275 :                     drop(guard);
    3220       604275 : 
    3221       604275 :                     result = match open_layer
    3222       604275 :                         .get_value_reconstruct_data(
    3223       604275 :                             key,
    3224       604275 :                             lsn_floor..cont_lsn,
    3225       604275 :                             reconstruct_state,
    3226       604275 :                             ctx,
    3227       604275 :                         )
    3228        10491 :                         .await
    3229              :                     {
    3230       604275 :                         Ok(result) => result,
    3231            0 :                         Err(e) => return Err(PageReconstructError::from(e)),
    3232              :                     };
    3233       604275 :                     cont_lsn = lsn_floor;
    3234       604275 :                     *read_count += 1;
    3235       604275 :                     traversal_path.push((result, cont_lsn, open_layer.traversal_id()));
    3236       604275 :                     continue 'outer;
    3237       113109 :                 }
    3238        98982 :             }
    3239       212091 :             for frozen_layer in layers.frozen_layers.iter().rev() {
    3240         1596 :                 let start_lsn = frozen_layer.get_lsn_range().start;
    3241         1596 :                 if cont_lsn > start_lsn {
    3242              :                     //info!("CHECKING for {} at {} on frozen layer {}", key, cont_lsn, frozen_layer.layer_name().display());
    3243         1596 :                     let lsn_floor = max(cached_lsn + 1, start_lsn);
    3244         1596 : 
    3245         1596 :                     let frozen_layer = frozen_layer.clone();
    3246         1596 :                     drop(guard);
    3247         1596 : 
    3248         1596 :                     result = match frozen_layer
    3249         1596 :                         .get_value_reconstruct_data(
    3250         1596 :                             key,
    3251         1596 :                             lsn_floor..cont_lsn,
    3252         1596 :                             reconstruct_state,
    3253         1596 :                             ctx,
    3254         1596 :                         )
    3255            0 :                         .await
    3256              :                     {
    3257         1596 :                         Ok(result) => result,
    3258            0 :                         Err(e) => return Err(PageReconstructError::from(e)),
    3259              :                     };
    3260         1596 :                     cont_lsn = lsn_floor;
    3261         1596 :                     *read_count += 1;
    3262         1596 :                     traversal_path.push((result, cont_lsn, frozen_layer.traversal_id()));
    3263         1596 :                     continue 'outer;
    3264            0 :                 }
    3265              :             }
    3266              : 
    3267       210495 :             if let Some(SearchResult { lsn_floor, layer }) = layers.search(key, cont_lsn) {
    3268       210377 :                 let layer = guard.get_from_desc(&layer);
    3269       210377 :                 drop(guard);
    3270       210377 :                 // Get all the data needed to reconstruct the page version from this layer.
    3271       210377 :                 // But if we have an older cached page image, no need to go past that.
    3272       210377 :                 let lsn_floor = max(cached_lsn + 1, lsn_floor);
    3273       210377 :                 result = match layer
    3274       210377 :                     .get_value_reconstruct_data(key, lsn_floor..cont_lsn, reconstruct_state, ctx)
    3275        29695 :                     .await
    3276              :                 {
    3277       210377 :                     Ok(result) => result,
    3278            0 :                     Err(e) => return Err(PageReconstructError::from(e)),
    3279              :                 };
    3280       210377 :                 cont_lsn = lsn_floor;
    3281       210377 :                 *read_count += 1;
    3282       210377 :                 traversal_path.push((result, cont_lsn, layer.traversal_id()));
    3283       210377 :                 continue 'outer;
    3284          118 :             } else if timeline.ancestor_timeline.is_some() {
    3285              :                 // Nothing on this timeline. Traverse to parent
    3286          108 :                 result = ValueReconstructResult::Continue;
    3287          108 :                 cont_lsn = Lsn(timeline.ancestor_lsn.0 + 1);
    3288          108 :                 continue 'outer;
    3289              :             } else {
    3290              :                 // Nothing found
    3291           10 :                 result = ValueReconstructResult::Missing;
    3292           10 :                 continue 'outer;
    3293              :             }
    3294              :         }
    3295       625369 :     }
    3296              : 
    3297              :     /// Get the data needed to reconstruct all keys in the provided keyspace
    3298              :     ///
    3299              :     /// The algorithm is as follows:
    3300              :     /// 1.   While some keys are still not done and there's a timeline to visit:
    3301              :     /// 2.   Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
    3302              :     /// 2.1: Build the fringe for the current keyspace
    3303              :     /// 2.2  Visit the newest layer from the fringe to collect all values for the range it
    3304              :     ///      intersects
    3305              :     /// 2.3. Pop the timeline from the fringe
    3306              :     /// 2.4. If the fringe is empty, go back to 1
    3307          470 :     async fn get_vectored_reconstruct_data(
    3308          470 :         &self,
    3309          470 :         mut keyspace: KeySpace,
    3310          470 :         request_lsn: Lsn,
    3311          470 :         reconstruct_state: &mut ValuesReconstructState,
    3312          470 :         ctx: &RequestContext,
    3313          470 :     ) -> Result<(), GetVectoredError> {
    3314          470 :         let mut timeline_owned: Arc<Timeline>;
    3315          470 :         let mut timeline = self;
    3316          470 : 
    3317          470 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3318              : 
    3319          470 :         let missing_keyspace = loop {
    3320          504 :             if self.cancel.is_cancelled() {
    3321            0 :                 return Err(GetVectoredError::Cancelled);
    3322          504 :             }
    3323              : 
    3324              :             let TimelineVisitOutcome {
    3325          504 :                 completed_keyspace: completed,
    3326          504 :                 image_covered_keyspace,
    3327          504 :             } = Self::get_vectored_reconstruct_data_timeline(
    3328          504 :                 timeline,
    3329          504 :                 keyspace.clone(),
    3330          504 :                 cont_lsn,
    3331          504 :                 reconstruct_state,
    3332          504 :                 &self.cancel,
    3333          504 :                 ctx,
    3334          504 :             )
    3335        11519 :             .await?;
    3336              : 
    3337          504 :             keyspace.remove_overlapping_with(&completed);
    3338          504 : 
    3339          504 :             // Do not descend into the ancestor timeline for aux files.
    3340          504 :             // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
    3341          504 :             // stalling compaction.
    3342          504 :             keyspace.remove_overlapping_with(&KeySpace {
    3343          504 :                 ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
    3344          504 :             });
    3345          504 : 
    3346          504 :             // Keyspace is fully retrieved
    3347          504 :             if keyspace.is_empty() {
    3348          460 :                 break None;
    3349           44 :             }
    3350           44 : 
    3351           44 :             // Not fully retrieved but no ancestor timeline.
    3352           44 :             if timeline.ancestor_timeline.is_none() {
    3353           10 :                 break Some(keyspace);
    3354           34 :             }
    3355           34 : 
    3356           34 :             // Now we see if there are keys covered by the image layer but does not exist in the
    3357           34 :             // image layer, which means that the key does not exist.
    3358           34 : 
    3359           34 :             // The block below will stop the vectored search if any of the keys encountered an image layer
    3360           34 :             // which did not contain a snapshot for said key. Since we have already removed all completed
    3361           34 :             // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
    3362           34 :             // space. If that's not the case, we had at least one key encounter a gap in the image layer
    3363           34 :             // and stop the search as a result of that.
    3364           34 :             let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
    3365           34 :             if !removed.is_empty() {
    3366            0 :                 break Some(removed);
    3367           34 :             }
    3368           34 :             // If we reached this point, `remove_overlapping_with` should not have made any change to the
    3369           34 :             // keyspace.
    3370           34 : 
    3371           34 :             // Take the min to avoid reconstructing a page with data newer than request Lsn.
    3372           34 :             cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
    3373           34 :             timeline_owned = timeline
    3374           34 :                 .get_ready_ancestor_timeline(ctx)
    3375            0 :                 .await
    3376           34 :                 .map_err(GetVectoredError::GetReadyAncestorError)?;
    3377           34 :             timeline = &*timeline_owned;
    3378              :         };
    3379              : 
    3380          470 :         if let Some(missing_keyspace) = missing_keyspace {
    3381           10 :             return Err(GetVectoredError::MissingKey(MissingKeyError {
    3382           10 :                 key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
    3383           10 :                 shard: self
    3384           10 :                     .shard_identity
    3385           10 :                     .get_shard_number(&missing_keyspace.start().unwrap()),
    3386           10 :                 cont_lsn,
    3387           10 :                 request_lsn,
    3388           10 :                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3389           10 :                 traversal_path: vec![],
    3390           10 :                 backtrace: None,
    3391           10 :             }));
    3392          460 :         }
    3393          460 : 
    3394          460 :         Ok(())
    3395          470 :     }
    3396              : 
    3397              :     /// Collect the reconstruct data for a keyspace from the specified timeline.
    3398              :     ///
    3399              :     /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
    3400              :     /// the current keyspace. The current keyspace of the search at any given timeline
    3401              :     /// is the original keyspace minus all the keys that have been completed minus
    3402              :     /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
    3403              :     /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
    3404              :     ///
    3405              :     /// This is basically a depth-first search visitor implementation where a vertex
    3406              :     /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
    3407              :     ///
    3408              :     /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
    3409              :     /// and get all the required reconstruct data from the layer in one go.
    3410              :     ///
    3411              :     /// Returns the completed keyspace and the keyspaces with image coverage. The caller
    3412              :     /// decides how to deal with these two keyspaces.
    3413          504 :     async fn get_vectored_reconstruct_data_timeline(
    3414          504 :         timeline: &Timeline,
    3415          504 :         keyspace: KeySpace,
    3416          504 :         mut cont_lsn: Lsn,
    3417          504 :         reconstruct_state: &mut ValuesReconstructState,
    3418          504 :         cancel: &CancellationToken,
    3419          504 :         ctx: &RequestContext,
    3420          504 :     ) -> Result<TimelineVisitOutcome, GetVectoredError> {
    3421          504 :         let mut unmapped_keyspace = keyspace.clone();
    3422          504 :         let mut fringe = LayerFringe::new();
    3423          504 : 
    3424          504 :         let mut completed_keyspace = KeySpace::default();
    3425          504 :         let mut image_covered_keyspace = KeySpaceRandomAccum::new();
    3426              : 
    3427          868 :         loop {
    3428          868 :             if cancel.is_cancelled() {
    3429            0 :                 return Err(GetVectoredError::Cancelled);
    3430          868 :             }
    3431          868 : 
    3432          868 :             let (keys_done_last_step, keys_with_image_coverage) =
    3433          868 :                 reconstruct_state.consume_done_keys();
    3434          868 :             unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
    3435          868 :             completed_keyspace.merge(&keys_done_last_step);
    3436          868 :             if let Some(keys_with_image_coverage) = keys_with_image_coverage {
    3437          102 :                 unmapped_keyspace
    3438          102 :                     .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
    3439          102 :                 image_covered_keyspace.add_range(keys_with_image_coverage);
    3440          766 :             }
    3441              : 
    3442              :             // Do not descent any further if the last layer we visited
    3443              :             // completed all keys in the keyspace it inspected. This is not
    3444              :             // required for correctness, but avoids visiting extra layers
    3445              :             // which turns out to be a perf bottleneck in some cases.
    3446          868 :             if !unmapped_keyspace.is_empty() {
    3447          722 :                 let guard = timeline.layers.read().await;
    3448          722 :                 let layers = guard.layer_map();
    3449          722 : 
    3450          722 :                 let in_memory_layer = layers.find_in_memory_layer(|l| {
    3451           20 :                     let start_lsn = l.get_lsn_range().start;
    3452           20 :                     cont_lsn > start_lsn
    3453          722 :                 });
    3454          722 : 
    3455          722 :                 match in_memory_layer {
    3456           10 :                     Some(l) => {
    3457           10 :                         let lsn_range = l.get_lsn_range().start..cont_lsn;
    3458           10 :                         fringe.update(
    3459           10 :                             ReadableLayer::InMemoryLayer(l),
    3460           10 :                             unmapped_keyspace.clone(),
    3461           10 :                             lsn_range,
    3462           10 :                         );
    3463           10 :                     }
    3464              :                     None => {
    3465        72508 :                         for range in unmapped_keyspace.ranges.iter() {
    3466        72508 :                             let results = layers.range_search(range.clone(), cont_lsn);
    3467        72508 : 
    3468        72508 :                             results
    3469        72508 :                                 .found
    3470        72508 :                                 .into_iter()
    3471        72508 :                                 .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
    3472        64130 :                                     (
    3473        64130 :                                         ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
    3474        64130 :                                         keyspace_accum.to_keyspace(),
    3475        64130 :                                         lsn_floor..cont_lsn,
    3476        64130 :                                     )
    3477        72508 :                                 })
    3478        72508 :                                 .for_each(|(layer, keyspace, lsn_range)| {
    3479        64130 :                                     fringe.update(layer, keyspace, lsn_range)
    3480        72508 :                                 });
    3481        72508 :                         }
    3482              :                     }
    3483              :                 }
    3484              : 
    3485              :                 // It's safe to drop the layer map lock after planning the next round of reads.
    3486              :                 // The fringe keeps readable handles for the layers which are safe to read even
    3487              :                 // if layers were compacted or flushed.
    3488              :                 //
    3489              :                 // The more interesting consideration is: "Why is the read algorithm still correct
    3490              :                 // if the layer map changes while it is operating?". Doing a vectored read on a
    3491              :                 // timeline boils down to pushing an imaginary lsn boundary downwards for each range
    3492              :                 // covered by the read. The layer map tells us how to move the lsn downwards for a
    3493              :                 // range at *a particular point in time*. It is fine for the answer to be different
    3494              :                 // at two different time points.
    3495          722 :                 drop(guard);
    3496          146 :             }
    3497              : 
    3498          868 :             if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
    3499          364 :                 let next_cont_lsn = lsn_range.start;
    3500          364 :                 layer_to_read
    3501          364 :                     .get_values_reconstruct_data(
    3502          364 :                         keyspace_to_read.clone(),
    3503          364 :                         lsn_range,
    3504          364 :                         reconstruct_state,
    3505          364 :                         ctx,
    3506          364 :                     )
    3507        11516 :                     .await?;
    3508              : 
    3509          364 :                 unmapped_keyspace = keyspace_to_read;
    3510          364 :                 cont_lsn = next_cont_lsn;
    3511          364 : 
    3512          364 :                 reconstruct_state.on_layer_visited(&layer_to_read);
    3513              :             } else {
    3514          504 :                 break;
    3515          504 :             }
    3516          504 :         }
    3517          504 : 
    3518          504 :         Ok(TimelineVisitOutcome {
    3519          504 :             completed_keyspace,
    3520          504 :             image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
    3521          504 :         })
    3522          504 :     }
    3523              : 
    3524              :     /// # Cancel-safety
    3525              :     ///
    3526              :     /// This method is cancellation-safe.
    3527       624021 :     async fn lookup_cached_page(
    3528       624021 :         &self,
    3529       624021 :         key: &Key,
    3530       624021 :         lsn: Lsn,
    3531       624021 :         ctx: &RequestContext,
    3532       624021 :     ) -> Option<(Lsn, Bytes)> {
    3533       624021 :         let cache = page_cache::get();
    3534              : 
    3535              :         // FIXME: It's pointless to check the cache for things that are not 8kB pages.
    3536              :         // We should look at the key to determine if it's a cacheable object
    3537       624021 :         let (lsn, read_guard) = cache
    3538       624021 :             .lookup_materialized_page(self.tenant_shard_id, self.timeline_id, key, lsn, ctx)
    3539       624021 :             .await?;
    3540            0 :         let img = Bytes::from(read_guard.to_vec());
    3541            0 :         Some((lsn, img))
    3542       624021 :     }
    3543              : 
    3544       227446 :     async fn get_ready_ancestor_timeline(
    3545       227446 :         &self,
    3546       227446 :         ctx: &RequestContext,
    3547       227446 :     ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
    3548       227446 :         let ancestor = match self.get_ancestor_timeline() {
    3549       227446 :             Ok(timeline) => timeline,
    3550            0 :             Err(e) => return Err(GetReadyAncestorError::from(e)),
    3551              :         };
    3552              : 
    3553              :         // It's possible that the ancestor timeline isn't active yet, or
    3554              :         // is active but hasn't yet caught up to the branch point. Wait
    3555              :         // for it.
    3556              :         //
    3557              :         // This cannot happen while the pageserver is running normally,
    3558              :         // because you cannot create a branch from a point that isn't
    3559              :         // present in the pageserver yet. However, we don't wait for the
    3560              :         // branch point to be uploaded to cloud storage before creating
    3561              :         // a branch. I.e., the branch LSN need not be remote consistent
    3562              :         // for the branching operation to succeed.
    3563              :         //
    3564              :         // Hence, if we try to load a tenant in such a state where
    3565              :         // 1. the existence of the branch was persisted (in IndexPart and/or locally)
    3566              :         // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
    3567              :         // then we will need to wait for the ancestor timeline to
    3568              :         // re-stream WAL up to branch_lsn before we access it.
    3569              :         //
    3570              :         // How can a tenant get in such a state?
    3571              :         // - ungraceful pageserver process exit
    3572              :         // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
    3573              :         //
    3574              :         // NB: this could be avoided by requiring
    3575              :         //   branch_lsn >= remote_consistent_lsn
    3576              :         // during branch creation.
    3577       227446 :         match ancestor.wait_to_become_active(ctx).await {
    3578       227444 :             Ok(()) => {}
    3579              :             Err(TimelineState::Stopping) => {
    3580            0 :                 return Err(GetReadyAncestorError::AncestorStopping(
    3581            0 :                     ancestor.timeline_id,
    3582            0 :                 ));
    3583              :             }
    3584            2 :             Err(state) => {
    3585            2 :                 return Err(GetReadyAncestorError::Other(anyhow::anyhow!(
    3586            2 :                     "Timeline {} will not become active. Current state: {:?}",
    3587            2 :                     ancestor.timeline_id,
    3588            2 :                     &state,
    3589            2 :                 )));
    3590              :             }
    3591              :         }
    3592       227444 :         ancestor
    3593       227444 :             .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
    3594            0 :             .await
    3595       227444 :             .map_err(|e| match e {
    3596            0 :                 e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
    3597            0 :                 WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
    3598            0 :                 e @ WaitLsnError::BadState => GetReadyAncestorError::Other(anyhow::anyhow!(e)),
    3599       227444 :             })?;
    3600              : 
    3601       227444 :         Ok(ancestor)
    3602       227446 :     }
    3603              : 
    3604       227446 :     pub(crate) fn get_ancestor_timeline(&self) -> anyhow::Result<Arc<Timeline>> {
    3605       227446 :         let ancestor = self.ancestor_timeline.as_ref().with_context(|| {
    3606            0 :             format!(
    3607            0 :                 "Ancestor is missing. Timeline id: {} Ancestor id {:?}",
    3608            0 :                 self.timeline_id,
    3609            0 :                 self.get_ancestor_timeline_id(),
    3610            0 :             )
    3611       227446 :         })?;
    3612       227446 :         Ok(Arc::clone(ancestor))
    3613       227446 :     }
    3614              : 
    3615         5452 :     pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
    3616         5452 :         &self.shard_identity
    3617         5452 :     }
    3618              : 
    3619              :     ///
    3620              :     /// Get a handle to the latest layer for appending.
    3621              :     ///
    3622      4804100 :     async fn get_layer_for_write(
    3623      4804100 :         &self,
    3624      4804100 :         lsn: Lsn,
    3625      4804100 :         ctx: &RequestContext,
    3626      4804100 :     ) -> anyhow::Result<Arc<InMemoryLayer>> {
    3627      4804100 :         let mut guard = self.layers.write().await;
    3628      4804100 :         let layer = guard
    3629      4804100 :             .get_layer_for_write(
    3630      4804100 :                 lsn,
    3631      4804100 :                 self.get_last_record_lsn(),
    3632      4804100 :                 self.conf,
    3633      4804100 :                 self.timeline_id,
    3634      4804100 :                 self.tenant_shard_id,
    3635      4804100 :                 ctx,
    3636      4804100 :             )
    3637          626 :             .await?;
    3638      4804100 :         Ok(layer)
    3639      4804100 :     }
    3640              : 
    3641      5279018 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    3642      5279018 :         assert!(new_lsn.is_aligned());
    3643              : 
    3644      5279018 :         self.metrics.last_record_gauge.set(new_lsn.0 as i64);
    3645      5279018 :         self.last_record_lsn.advance(new_lsn);
    3646      5279018 :     }
    3647              : 
    3648              :     /// Whether there was a layer to freeze or not, return the value of get_last_record_lsn
    3649              :     /// before we attempted the freeze: this guarantees that ingested data is frozen up to this lsn (inclusive).
    3650         1038 :     async fn freeze_inmem_layer(&self, write_lock_held: bool) -> Lsn {
    3651              :         // Freeze the current open in-memory layer. It will be written to disk on next
    3652              :         // iteration.
    3653              : 
    3654         1038 :         let _write_guard = if write_lock_held {
    3655            0 :             None
    3656              :         } else {
    3657         1038 :             Some(self.write_lock.lock().await)
    3658              :         };
    3659              : 
    3660         1038 :         let to_lsn = self.get_last_record_lsn();
    3661         1038 :         self.freeze_inmem_layer_at(to_lsn).await;
    3662         1038 :         to_lsn
    3663         1038 :     }
    3664              : 
    3665         1038 :     async fn freeze_inmem_layer_at(&self, at: Lsn) {
    3666         1038 :         let mut guard = self.layers.write().await;
    3667         1038 :         guard
    3668         1038 :             .try_freeze_in_memory_layer(at, &self.last_freeze_at)
    3669            4 :             .await;
    3670         1038 :     }
    3671              : 
    3672              :     /// Layer flusher task's main loop.
    3673          346 :     async fn flush_loop(
    3674          346 :         self: &Arc<Self>,
    3675          346 :         mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
    3676          346 :         ctx: &RequestContext,
    3677          346 :     ) {
    3678          346 :         info!("started flush loop");
    3679         1038 :         loop {
    3680         1038 :             tokio::select! {
    3681              :                 _ = self.cancel.cancelled() => {
    3682              :                     info!("shutting down layer flush task due to Timeline::cancel");
    3683              :                     break;
    3684              :                 },
    3685              :                 _ = layer_flush_start_rx.changed() => {}
    3686              :             }
    3687         1038 :             trace!("waking up");
    3688         1038 :             let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
    3689         1038 : 
    3690         1038 :             // The highest LSN to which we flushed in the loop over frozen layers
    3691         1038 :             let mut flushed_to_lsn = Lsn(0);
    3692              : 
    3693         1038 :             let result = loop {
    3694         2048 :                 if self.cancel.is_cancelled() {
    3695            0 :                     info!("dropping out of flush loop for timeline shutdown");
    3696              :                     // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
    3697              :                     // anyone waiting on that will respect self.cancel as well: they will stop
    3698              :                     // waiting at the same time we as drop out of this loop.
    3699            0 :                     return;
    3700         2048 :                 }
    3701         2048 : 
    3702         2048 :                 let timer = self.metrics.flush_time_histo.start_timer();
    3703              : 
    3704         2048 :                 let layer_to_flush = {
    3705         2048 :                     let guard = self.layers.read().await;
    3706         2048 :                     guard.layer_map().frozen_layers.front().cloned()
    3707              :                     // drop 'layers' lock to allow concurrent reads and writes
    3708              :                 };
    3709         2048 :                 let Some(layer_to_flush) = layer_to_flush else {
    3710         1038 :                     break Ok(());
    3711              :                 };
    3712        58312 :                 match self.flush_frozen_layer(layer_to_flush, ctx).await {
    3713         1010 :                     Ok(this_layer_to_lsn) => {
    3714         1010 :                         flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
    3715         1010 :                     }
    3716              :                     Err(FlushLayerError::Cancelled) => {
    3717            0 :                         info!("dropping out of flush loop for timeline shutdown");
    3718            0 :                         return;
    3719              :                     }
    3720            0 :                     err @ Err(
    3721              :                         FlushLayerError::Other(_) | FlushLayerError::CreateImageLayersError(_),
    3722              :                     ) => {
    3723            0 :                         error!("could not flush frozen layer: {err:?}");
    3724            0 :                         break err.map(|_| ());
    3725              :                     }
    3726              :                 }
    3727         1010 :                 timer.stop_and_record();
    3728              :             };
    3729              : 
    3730              :             // Unsharded tenants should never advance their LSN beyond the end of the
    3731              :             // highest layer they write: such gaps between layer data and the frozen LSN
    3732              :             // are only legal on sharded tenants.
    3733         1038 :             debug_assert!(
    3734         1038 :                 self.shard_identity.count.count() > 1
    3735         1038 :                     || flushed_to_lsn >= frozen_to_lsn
    3736           28 :                     || !flushed_to_lsn.is_valid()
    3737              :             );
    3738              : 
    3739         1038 :             if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
    3740              :                 // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
    3741              :                 // to us via layer_flush_start_rx, then advance it here.
    3742              :                 //
    3743              :                 // This path is only taken for tenants with multiple shards: single sharded tenants should
    3744              :                 // never encounter a gap in the wal.
    3745            0 :                 let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
    3746            0 :                 tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
    3747            0 :                 if self.set_disk_consistent_lsn(frozen_to_lsn) {
    3748            0 :                     if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
    3749            0 :                         tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
    3750            0 :                     }
    3751            0 :                 }
    3752         1038 :             }
    3753              : 
    3754              :             // Notify any listeners that we're done
    3755         1038 :             let _ = self
    3756         1038 :                 .layer_flush_done_tx
    3757         1038 :                 .send_replace((flush_counter, result));
    3758              :         }
    3759            8 :     }
    3760              : 
    3761              :     /// Request the flush loop to write out all frozen layers up to `to_lsn` as Delta L0 files to disk.
    3762              :     /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer`].
    3763              :     ///
    3764              :     /// `last_record_lsn` may be higher than the highest LSN of a frozen layer: if this is the case,
    3765              :     /// it means no data will be written between the top of the highest frozen layer and to_lsn,
    3766              :     /// e.g. because this tenant shard has ingested up to to_lsn and not written any data locally for that part of the WAL.
    3767         1038 :     async fn flush_frozen_layers_and_wait(&self, last_record_lsn: Lsn) -> anyhow::Result<()> {
    3768         1038 :         let mut rx = self.layer_flush_done_tx.subscribe();
    3769         1038 : 
    3770         1038 :         // Increment the flush cycle counter and wake up the flush task.
    3771         1038 :         // Remember the new value, so that when we listen for the flush
    3772         1038 :         // to finish, we know when the flush that we initiated has
    3773         1038 :         // finished, instead of some other flush that was started earlier.
    3774         1038 :         let mut my_flush_request = 0;
    3775         1038 : 
    3776         1038 :         let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
    3777         1038 :         if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
    3778            0 :             anyhow::bail!("cannot flush frozen layers when flush_loop is not running, state is {flush_loop_state:?}")
    3779         1038 :         }
    3780         1038 : 
    3781         1038 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    3782         1038 :             my_flush_request = *counter + 1;
    3783         1038 :             *counter = my_flush_request;
    3784         1038 :             *lsn = std::cmp::max(last_record_lsn, *lsn);
    3785         1038 :         });
    3786              : 
    3787         2069 :         loop {
    3788         2069 :             {
    3789         2069 :                 let (last_result_counter, last_result) = &*rx.borrow();
    3790         2069 :                 if *last_result_counter >= my_flush_request {
    3791         1038 :                     if let Err(_err) = last_result {
    3792              :                         // We already logged the original error in
    3793              :                         // flush_loop. We cannot propagate it to the caller
    3794              :                         // here, because it might not be Cloneable
    3795            0 :                         anyhow::bail!(
    3796            0 :                             "Could not flush frozen layer. Request id: {}",
    3797            0 :                             my_flush_request
    3798            0 :                         );
    3799              :                     } else {
    3800         1038 :                         return Ok(());
    3801              :                     }
    3802         1031 :                 }
    3803         1031 :             }
    3804         1031 :             trace!("waiting for flush to complete");
    3805              :             tokio::select! {
    3806              :                 rx_e = rx.changed() => {
    3807              :                     rx_e?;
    3808              :                 },
    3809              :                 // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
    3810              :                 // the notification from [`flush_loop`] that it completed.
    3811              :                 _ = self.cancel.cancelled() => {
    3812              :                     tracing::info!("Cancelled layer flush due on timeline shutdown");
    3813              :                     return Ok(())
    3814              :                 }
    3815              :             };
    3816         1031 :             trace!("done")
    3817              :         }
    3818         1038 :     }
    3819              : 
    3820            0 :     fn flush_frozen_layers(&self) {
    3821            0 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    3822            0 :             *counter += 1;
    3823            0 : 
    3824            0 :             *lsn = std::cmp::max(*lsn, Lsn(self.last_freeze_at.load().0 - 1));
    3825            0 :         });
    3826            0 :     }
    3827              : 
    3828              :     /// Flush one frozen in-memory layer to disk, as a new delta layer.
    3829              :     ///
    3830              :     /// Return value is the last lsn (inclusive) of the layer that was frozen.
    3831         2020 :     #[instrument(skip_all, fields(layer=%frozen_layer))]
    3832              :     async fn flush_frozen_layer(
    3833              :         self: &Arc<Self>,
    3834              :         frozen_layer: Arc<InMemoryLayer>,
    3835              :         ctx: &RequestContext,
    3836              :     ) -> Result<Lsn, FlushLayerError> {
    3837              :         debug_assert_current_span_has_tenant_and_timeline_id();
    3838              : 
    3839              :         // As a special case, when we have just imported an image into the repository,
    3840              :         // instead of writing out a L0 delta layer, we directly write out image layer
    3841              :         // files instead. This is possible as long as *all* the data imported into the
    3842              :         // repository have the same LSN.
    3843              :         let lsn_range = frozen_layer.get_lsn_range();
    3844              : 
    3845              :         // Whether to directly create image layers for this flush, or flush them as delta layers
    3846              :         let create_image_layer =
    3847              :             lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
    3848              : 
    3849              :         #[cfg(test)]
    3850              :         {
    3851              :             match &mut *self.flush_loop_state.lock().unwrap() {
    3852              :                 FlushLoopState::NotStarted | FlushLoopState::Exited => {
    3853              :                     panic!("flush loop not running")
    3854              :                 }
    3855              :                 FlushLoopState::Running {
    3856              :                     expect_initdb_optimization,
    3857              :                     initdb_optimization_count,
    3858              :                     ..
    3859              :                 } => {
    3860              :                     if create_image_layer {
    3861              :                         *initdb_optimization_count += 1;
    3862              :                     } else {
    3863              :                         assert!(!*expect_initdb_optimization, "expected initdb optimization");
    3864              :                     }
    3865              :                 }
    3866              :             }
    3867              :         }
    3868              : 
    3869              :         let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
    3870              :             // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
    3871              :             // require downloading anything during initial import.
    3872              :             let ((rel_partition, metadata_partition), _lsn) = self
    3873              :                 .repartition(
    3874              :                     self.initdb_lsn,
    3875              :                     self.get_compaction_target_size(),
    3876              :                     EnumSet::empty(),
    3877              :                     ctx,
    3878              :                 )
    3879              :                 .await?;
    3880              : 
    3881              :             if self.cancel.is_cancelled() {
    3882              :                 return Err(FlushLayerError::Cancelled);
    3883              :             }
    3884              : 
    3885              :             // For metadata, always create delta layers.
    3886              :             let delta_layer = if !metadata_partition.parts.is_empty() {
    3887              :                 assert_eq!(
    3888              :                     metadata_partition.parts.len(),
    3889              :                     1,
    3890              :                     "currently sparse keyspace should only contain a single aux file keyspace"
    3891              :                 );
    3892              :                 let metadata_keyspace = &metadata_partition.parts[0];
    3893              :                 assert_eq!(
    3894              :                     metadata_keyspace.0.ranges.len(),
    3895              :                     1,
    3896              :                     "aux file keyspace should be a single range"
    3897              :                 );
    3898              :                 self.create_delta_layer(
    3899              :                     &frozen_layer,
    3900              :                     Some(metadata_keyspace.0.ranges[0].clone()),
    3901              :                     ctx,
    3902              :                 )
    3903              :                 .await?
    3904              :             } else {
    3905              :                 None
    3906              :             };
    3907              : 
    3908              :             // For image layers, we add them immediately into the layer map.
    3909              :             let mut layers_to_upload = Vec::new();
    3910              :             layers_to_upload.extend(
    3911              :                 self.create_image_layers(
    3912              :                     &rel_partition,
    3913              :                     self.initdb_lsn,
    3914              :                     ImageLayerCreationMode::Initial,
    3915              :                     ctx,
    3916              :                 )
    3917              :                 .await?,
    3918              :             );
    3919              : 
    3920              :             if let Some(delta_layer) = delta_layer {
    3921              :                 layers_to_upload.push(delta_layer.clone());
    3922              :                 (layers_to_upload, Some(delta_layer))
    3923              :             } else {
    3924              :                 (layers_to_upload, None)
    3925              :             }
    3926              :         } else {
    3927              :             // Normal case, write out a L0 delta layer file.
    3928              :             // `create_delta_layer` will not modify the layer map.
    3929              :             // We will remove frozen layer and add delta layer in one atomic operation later.
    3930              :             let Some(layer) = self.create_delta_layer(&frozen_layer, None, ctx).await? else {
    3931              :                 panic!("delta layer cannot be empty if no filter is applied");
    3932              :             };
    3933              :             (
    3934              :                 // FIXME: even though we have a single image and single delta layer assumption
    3935              :                 // we push them to vec
    3936              :                 vec![layer.clone()],
    3937              :                 Some(layer),
    3938              :             )
    3939              :         };
    3940              : 
    3941              :         pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
    3942              : 
    3943              :         if self.cancel.is_cancelled() {
    3944              :             return Err(FlushLayerError::Cancelled);
    3945              :         }
    3946              : 
    3947              :         let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
    3948              : 
    3949              :         // The new on-disk layers are now in the layer map. We can remove the
    3950              :         // in-memory layer from the map now. The flushed layer is stored in
    3951              :         // the mapping in `create_delta_layer`.
    3952              :         {
    3953              :             let mut guard = self.layers.write().await;
    3954              : 
    3955              :             if self.cancel.is_cancelled() {
    3956              :                 return Err(FlushLayerError::Cancelled);
    3957              :             }
    3958              : 
    3959              :             guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
    3960              : 
    3961              :             if self.set_disk_consistent_lsn(disk_consistent_lsn) {
    3962              :                 // Schedule remote uploads that will reflect our new disk_consistent_lsn
    3963              :                 self.schedule_uploads(disk_consistent_lsn, layers_to_upload)?;
    3964              :             }
    3965              :             // release lock on 'layers'
    3966              :         };
    3967              : 
    3968              :         // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
    3969              :         // a compaction can delete the file and then it won't be available for uploads any more.
    3970              :         // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
    3971              :         // race situation.
    3972              :         // See https://github.com/neondatabase/neon/issues/4526
    3973              :         pausable_failpoint!("flush-frozen-pausable");
    3974              : 
    3975              :         // This failpoint is used by another test case `test_pageserver_recovery`.
    3976              :         fail_point!("flush-frozen-exit");
    3977              : 
    3978              :         Ok(Lsn(lsn_range.end.0 - 1))
    3979              :     }
    3980              : 
    3981              :     /// Return true if the value changed
    3982              :     ///
    3983              :     /// This function must only be used from the layer flush task, and may not be called concurrently.
    3984         1010 :     fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
    3985         1010 :         // We do a simple load/store cycle: that's why this function isn't safe for concurrent use.
    3986         1010 :         let old_value = self.disk_consistent_lsn.load();
    3987         1010 :         if new_value != old_value {
    3988         1010 :             assert!(new_value >= old_value);
    3989         1010 :             self.disk_consistent_lsn.store(new_value);
    3990         1010 :             true
    3991              :         } else {
    3992            0 :             false
    3993              :         }
    3994         1010 :     }
    3995              : 
    3996              :     /// Update metadata file
    3997         1014 :     fn schedule_uploads(
    3998         1014 :         &self,
    3999         1014 :         disk_consistent_lsn: Lsn,
    4000         1014 :         layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
    4001         1014 :     ) -> anyhow::Result<()> {
    4002         1014 :         // We can only save a valid 'prev_record_lsn' value on disk if we
    4003         1014 :         // flushed *all* in-memory changes to disk. We only track
    4004         1014 :         // 'prev_record_lsn' in memory for the latest processed record, so we
    4005         1014 :         // don't remember what the correct value that corresponds to some old
    4006         1014 :         // LSN is. But if we flush everything, then the value corresponding
    4007         1014 :         // current 'last_record_lsn' is correct and we can store it on disk.
    4008         1014 :         let RecordLsn {
    4009         1014 :             last: last_record_lsn,
    4010         1014 :             prev: prev_record_lsn,
    4011         1014 :         } = self.last_record_lsn.load();
    4012         1014 :         let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
    4013         1014 :             Some(prev_record_lsn)
    4014              :         } else {
    4015            0 :             None
    4016              :         };
    4017              : 
    4018         1014 :         let update = crate::tenant::metadata::MetadataUpdate::new(
    4019         1014 :             disk_consistent_lsn,
    4020         1014 :             ondisk_prev_record_lsn,
    4021         1014 :             *self.latest_gc_cutoff_lsn.read(),
    4022         1014 :         );
    4023         1014 : 
    4024         1014 :         fail_point!("checkpoint-before-saving-metadata", |x| bail!(
    4025            0 :             "{}",
    4026            0 :             x.unwrap()
    4027         1014 :         ));
    4028              : 
    4029         2038 :         for layer in layers_to_upload {
    4030         1024 :             self.remote_client.schedule_layer_file_upload(layer)?;
    4031              :         }
    4032         1014 :         self.remote_client
    4033         1014 :             .schedule_index_upload_for_metadata_update(&update)?;
    4034              : 
    4035         1014 :         Ok(())
    4036         1014 :     }
    4037              : 
    4038            0 :     pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
    4039            0 :         self.remote_client
    4040            0 :             .preserve_initdb_archive(
    4041            0 :                 &self.tenant_shard_id.tenant_id,
    4042            0 :                 &self.timeline_id,
    4043            0 :                 &self.cancel,
    4044            0 :             )
    4045            0 :             .await
    4046            0 :     }
    4047              : 
    4048              :     // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
    4049              :     // in layer map immediately. The caller is responsible to put it into the layer map.
    4050         1010 :     async fn create_delta_layer(
    4051         1010 :         self: &Arc<Self>,
    4052         1010 :         frozen_layer: &Arc<InMemoryLayer>,
    4053         1010 :         key_range: Option<Range<Key>>,
    4054         1010 :         ctx: &RequestContext,
    4055         1010 :     ) -> anyhow::Result<Option<ResidentLayer>> {
    4056         1010 :         let self_clone = Arc::clone(self);
    4057         1010 :         let frozen_layer = Arc::clone(frozen_layer);
    4058         1010 :         let ctx = ctx.attached_child();
    4059         1010 :         let work = async move {
    4060         1010 :             let Some(new_delta) = frozen_layer
    4061         1010 :                 .write_to_disk(&self_clone, &ctx, key_range)
    4062        85556 :                 .await?
    4063              :             else {
    4064          114 :                 return Ok(None);
    4065              :             };
    4066              :             // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
    4067              :             // We just need to fsync the directory in which these inodes are linked,
    4068              :             // which we know to be the timeline directory.
    4069              :             //
    4070              :             // We use fatal_err() below because the after write_to_disk returns with success,
    4071              :             // the in-memory state of the filesystem already has the layer file in its final place,
    4072              :             // and subsequent pageserver code could think it's durable while it really isn't.
    4073          896 :             let timeline_dir = VirtualFile::open(
    4074          896 :                 &self_clone
    4075          896 :                     .conf
    4076          896 :                     .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
    4077          896 :                 &ctx,
    4078          896 :             )
    4079          448 :             .await
    4080          896 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    4081          896 :             timeline_dir
    4082          896 :                 .sync_all()
    4083          448 :                 .await
    4084          896 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    4085          896 :             anyhow::Ok(Some(new_delta))
    4086         1010 :         };
    4087              :         // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
    4088              :         // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
    4089              :         use crate::virtual_file::io_engine::IoEngine;
    4090         1010 :         match crate::virtual_file::io_engine::get() {
    4091            0 :             IoEngine::NotSet => panic!("io engine not set"),
    4092              :             IoEngine::StdFs => {
    4093          505 :                 let span = tracing::info_span!("blocking");
    4094          505 :                 tokio::task::spawn_blocking({
    4095          505 :                     move || Handle::current().block_on(work.instrument(span))
    4096          505 :                 })
    4097          505 :                 .await
    4098          505 :                 .context("spawn_blocking")
    4099          505 :                 .and_then(|x| x)
    4100              :             }
    4101              :             #[cfg(target_os = "linux")]
    4102        54366 :             IoEngine::TokioEpollUring => work.await,
    4103              :         }
    4104         1010 :     }
    4105              : 
    4106          480 :     async fn repartition(
    4107          480 :         &self,
    4108          480 :         lsn: Lsn,
    4109          480 :         partition_size: u64,
    4110          480 :         flags: EnumSet<CompactFlags>,
    4111          480 :         ctx: &RequestContext,
    4112          480 :     ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
    4113          480 :         let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
    4114              :             // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
    4115              :             // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
    4116              :             // and hence before the compaction task starts.
    4117            0 :             anyhow::bail!("repartition() called concurrently, this should not happen");
    4118              :         };
    4119          480 :         let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
    4120          480 :         if lsn < *partition_lsn {
    4121            0 :             anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
    4122          480 :         }
    4123          480 : 
    4124          480 :         let distance = lsn.0 - partition_lsn.0;
    4125          480 :         if *partition_lsn != Lsn(0)
    4126          262 :             && distance <= self.repartition_threshold
    4127          262 :             && !flags.contains(CompactFlags::ForceRepartition)
    4128              :         {
    4129          250 :             debug!(
    4130              :                 distance,
    4131              :                 threshold = self.repartition_threshold,
    4132            0 :                 "no repartitioning needed"
    4133              :             );
    4134          250 :             return Ok((
    4135          250 :                 (dense_partition.clone(), sparse_partition.clone()),
    4136          250 :                 *partition_lsn,
    4137          250 :             ));
    4138          230 :         }
    4139              : 
    4140        13419 :         let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
    4141          230 :         let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
    4142          230 :         let sparse_partitioning = SparseKeyPartitioning {
    4143          230 :             parts: vec![sparse_ks],
    4144          230 :         }; // no partitioning for metadata keys for now
    4145          230 :         *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
    4146          230 : 
    4147          230 :         Ok((partitioning_guard.0.clone(), partitioning_guard.1))
    4148          480 :     }
    4149              : 
    4150              :     // Is it time to create a new image layer for the given partition?
    4151           14 :     async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
    4152           14 :         let threshold = self.get_image_creation_threshold();
    4153              : 
    4154           14 :         let guard = self.layers.read().await;
    4155           14 :         let layers = guard.layer_map();
    4156           14 : 
    4157           14 :         let mut max_deltas = 0;
    4158           28 :         for part_range in &partition.ranges {
    4159           14 :             let image_coverage = layers.image_coverage(part_range, lsn);
    4160           28 :             for (img_range, last_img) in image_coverage {
    4161           14 :                 let img_lsn = if let Some(last_img) = last_img {
    4162            0 :                     last_img.get_lsn_range().end
    4163              :                 } else {
    4164           14 :                     Lsn(0)
    4165              :                 };
    4166              :                 // Let's consider an example:
    4167              :                 //
    4168              :                 // delta layer with LSN range 71-81
    4169              :                 // delta layer with LSN range 81-91
    4170              :                 // delta layer with LSN range 91-101
    4171              :                 // image layer at LSN 100
    4172              :                 //
    4173              :                 // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
    4174              :                 // there's no need to create a new one. We check this case explicitly, to avoid passing
    4175              :                 // a bogus range to count_deltas below, with start > end. It's even possible that there
    4176              :                 // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
    4177              :                 // after we read last_record_lsn, which is passed here in the 'lsn' argument.
    4178           14 :                 if img_lsn < lsn {
    4179           14 :                     let num_deltas =
    4180           14 :                         layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
    4181           14 : 
    4182           14 :                     max_deltas = max_deltas.max(num_deltas);
    4183           14 :                     if num_deltas >= threshold {
    4184            0 :                         debug!(
    4185            0 :                             "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
    4186              :                             img_range.start, img_range.end, num_deltas, img_lsn, lsn
    4187              :                         );
    4188            0 :                         return true;
    4189           14 :                     }
    4190            0 :                 }
    4191              :             }
    4192              :         }
    4193              : 
    4194           14 :         debug!(
    4195              :             max_deltas,
    4196            0 :             "none of the partitioned ranges had >= {threshold} deltas"
    4197              :         );
    4198           14 :         false
    4199           14 :     }
    4200              : 
    4201              :     /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
    4202              :     /// so that at most one image layer will be produced from this function.
    4203          144 :     async fn create_image_layer_for_rel_blocks(
    4204          144 :         self: &Arc<Self>,
    4205          144 :         partition: &KeySpace,
    4206          144 :         mut image_layer_writer: ImageLayerWriter,
    4207          144 :         lsn: Lsn,
    4208          144 :         ctx: &RequestContext,
    4209          144 :         img_range: Range<Key>,
    4210          144 :         start: Key,
    4211          144 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4212          144 :         let mut wrote_keys = false;
    4213          144 : 
    4214          144 :         let mut key_request_accum = KeySpaceAccum::new();
    4215          918 :         for range in &partition.ranges {
    4216          774 :             let mut key = range.start;
    4217         1800 :             while key < range.end {
    4218              :                 // Decide whether to retain this key: usually we do, but sharded tenants may
    4219              :                 // need to drop keys that don't belong to them.  If we retain the key, add it
    4220              :                 // to `key_request_accum` for later issuing a vectored get
    4221         1026 :                 if self.shard_identity.is_key_disposable(&key) {
    4222            0 :                     debug!(
    4223            0 :                         "Dropping key {} during compaction (it belongs on shard {:?})",
    4224            0 :                         key,
    4225            0 :                         self.shard_identity.get_shard_number(&key)
    4226              :                     );
    4227         1026 :                 } else {
    4228         1026 :                     key_request_accum.add_key(key);
    4229         1026 :                 }
    4230              : 
    4231         1026 :                 let last_key_in_range = key.next() == range.end;
    4232         1026 :                 key = key.next();
    4233         1026 : 
    4234         1026 :                 // Maybe flush `key_rest_accum`
    4235         1026 :                 if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
    4236         1026 :                     || (last_key_in_range && key_request_accum.raw_size() > 0)
    4237              :                 {
    4238          774 :                     let results = self
    4239          774 :                         .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
    4240           18 :                         .await?;
    4241              : 
    4242         1800 :                     for (img_key, img) in results {
    4243         1026 :                         let img = match img {
    4244         1026 :                             Ok(img) => img,
    4245            0 :                             Err(err) => {
    4246            0 :                                 // If we fail to reconstruct a VM or FSM page, we can zero the
    4247            0 :                                 // page without losing any actual user data. That seems better
    4248            0 :                                 // than failing repeatedly and getting stuck.
    4249            0 :                                 //
    4250            0 :                                 // We had a bug at one point, where we truncated the FSM and VM
    4251            0 :                                 // in the pageserver, but the Postgres didn't know about that
    4252            0 :                                 // and continued to generate incremental WAL records for pages
    4253            0 :                                 // that didn't exist in the pageserver. Trying to replay those
    4254            0 :                                 // WAL records failed to find the previous image of the page.
    4255            0 :                                 // This special case allows us to recover from that situation.
    4256            0 :                                 // See https://github.com/neondatabase/neon/issues/2601.
    4257            0 :                                 //
    4258            0 :                                 // Unfortunately we cannot do this for the main fork, or for
    4259            0 :                                 // any metadata keys, keys, as that would lead to actual data
    4260            0 :                                 // loss.
    4261            0 :                                 if is_rel_fsm_block_key(img_key) || is_rel_vm_block_key(img_key) {
    4262            0 :                                     warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
    4263            0 :                                     ZERO_PAGE.clone()
    4264              :                                 } else {
    4265            0 :                                     return Err(CreateImageLayersError::PageReconstructError(err));
    4266              :                                 }
    4267              :                             }
    4268              :                         };
    4269              : 
    4270              :                         // Write all the keys we just read into our new image layer.
    4271         1091 :                         image_layer_writer.put_image(img_key, img, ctx).await?;
    4272         1026 :                         wrote_keys = true;
    4273              :                     }
    4274          252 :                 }
    4275              :             }
    4276              :         }
    4277              : 
    4278          144 :         if wrote_keys {
    4279              :             // Normal path: we have written some data into the new image layer for this
    4280              :             // partition, so flush it to disk.
    4281          289 :             let image_layer = image_layer_writer.finish(self, ctx).await?;
    4282          144 :             Ok(ImageLayerCreationOutcome {
    4283          144 :                 image: Some(image_layer),
    4284          144 :                 next_start_key: img_range.end,
    4285          144 :             })
    4286              :         } else {
    4287              :             // Special case: the image layer may be empty if this is a sharded tenant and the
    4288              :             // partition does not cover any keys owned by this shard.  In this case, to ensure
    4289              :             // we don't leave gaps between image layers, leave `start` where it is, so that the next
    4290              :             // layer we write will cover the key range that we just scanned.
    4291            0 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4292            0 :             Ok(ImageLayerCreationOutcome {
    4293            0 :                 image: None,
    4294            0 :                 next_start_key: start,
    4295            0 :             })
    4296              :         }
    4297          144 :     }
    4298              : 
    4299              :     /// Create an image layer for metadata keys. This function produces one image layer for all metadata
    4300              :     /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
    4301              :     /// would not be too large to fit in a single image layer.
    4302              :     #[allow(clippy::too_many_arguments)]
    4303          364 :     async fn create_image_layer_for_metadata_keys(
    4304          364 :         self: &Arc<Self>,
    4305          364 :         partition: &KeySpace,
    4306          364 :         mut image_layer_writer: ImageLayerWriter,
    4307          364 :         lsn: Lsn,
    4308          364 :         ctx: &RequestContext,
    4309          364 :         img_range: Range<Key>,
    4310          364 :         mode: ImageLayerCreationMode,
    4311          364 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4312          364 :         assert!(!matches!(mode, ImageLayerCreationMode::Initial));
    4313              : 
    4314              :         // Metadata keys image layer creation.
    4315          364 :         let mut reconstruct_state = ValuesReconstructState::default();
    4316          364 :         let data = self
    4317          364 :             .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
    4318         4129 :             .await?;
    4319          364 :         let (data, total_kb_retrieved, total_key_retrieved) = {
    4320          364 :             let mut new_data = BTreeMap::new();
    4321          364 :             let mut total_kb_retrieved = 0;
    4322          364 :             let mut total_key_retrieved = 0;
    4323        48368 :             for (k, v) in data {
    4324        48004 :                 let v = v.map_err(CreateImageLayersError::PageReconstructError)?;
    4325        48004 :                 total_kb_retrieved += KEY_SIZE + v.len();
    4326        48004 :                 total_key_retrieved += 1;
    4327        48004 :                 new_data.insert(k, v);
    4328              :             }
    4329          364 :             (new_data, total_kb_retrieved / 1024, total_key_retrieved)
    4330          364 :         };
    4331          364 :         let delta_file_accessed = reconstruct_state.get_delta_layers_visited();
    4332          364 : 
    4333          364 :         let trigger_generation = delta_file_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
    4334          364 :         info!(
    4335            0 :             "generate image layers for metadata keys: trigger_generation={trigger_generation}, \
    4336            0 :                 delta_file_accessed={delta_file_accessed}, total_kb_retrieved={total_kb_retrieved}, \
    4337            0 :                 total_key_retrieved={total_key_retrieved}"
    4338              :         );
    4339          364 :         if !trigger_generation && mode == ImageLayerCreationMode::Try {
    4340          350 :             return Ok(ImageLayerCreationOutcome {
    4341          350 :                 image: None,
    4342          350 :                 next_start_key: img_range.end,
    4343          350 :             });
    4344           14 :         }
    4345           14 :         let has_keys = !data.is_empty();
    4346        10018 :         for (k, v) in data {
    4347              :             // Even if the value is empty (deleted), we do not delete it for now until we can ensure vectored get
    4348              :             // considers this situation properly.
    4349              :             // if v.is_empty() {
    4350              :             //     continue;
    4351              :             // }
    4352              : 
    4353              :             // No need to handle sharding b/c metadata keys are always on the 0-th shard.
    4354              : 
    4355              :             // TODO: split image layers to avoid too large layer files. Too large image files are not handled
    4356              :             // on the normal data path either.
    4357        10160 :             image_layer_writer.put_image(k, v, ctx).await?;
    4358              :         }
    4359              :         Ok(ImageLayerCreationOutcome {
    4360           14 :             image: if has_keys {
    4361           29 :                 let image_layer = image_layer_writer.finish(self, ctx).await?;
    4362           14 :                 Some(image_layer)
    4363              :             } else {
    4364            0 :                 tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4365            0 :                 None
    4366              :             },
    4367           14 :             next_start_key: img_range.end,
    4368              :         })
    4369          364 :     }
    4370              : 
    4371          960 :     #[tracing::instrument(skip_all, fields(%lsn, %mode))]
    4372              :     async fn create_image_layers(
    4373              :         self: &Arc<Timeline>,
    4374              :         partitioning: &KeyPartitioning,
    4375              :         lsn: Lsn,
    4376              :         mode: ImageLayerCreationMode,
    4377              :         ctx: &RequestContext,
    4378              :     ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
    4379              :         let timer = self.metrics.create_images_time_histo.start_timer();
    4380              :         let mut image_layers = Vec::new();
    4381              : 
    4382              :         // We need to avoid holes between generated image layers.
    4383              :         // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
    4384              :         // image layer with hole between them. In this case such layer can not be utilized by GC.
    4385              :         //
    4386              :         // How such hole between partitions can appear?
    4387              :         // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
    4388              :         // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
    4389              :         // If there is delta layer <100000000..300000000> then it never be garbage collected because
    4390              :         // image layers  <100000000..100000099> and <200000000..200000199> are not completely covering it.
    4391              :         let mut start = Key::MIN;
    4392              : 
    4393              :         let check_for_image_layers = {
    4394              :             let last_checks_at = self.last_image_layer_creation_check_at.load();
    4395              :             let distance = lsn
    4396              :                 .checked_sub(last_checks_at)
    4397              :                 .expect("Attempt to compact with LSN going backwards");
    4398              :             let min_distance = self.get_image_layer_creation_check_threshold() as u64
    4399              :                 * self.get_checkpoint_distance();
    4400              : 
    4401              :             // Skip the expensive delta layer counting if this timeline has not ingested sufficient
    4402              :             // WAL since the last check.
    4403              :             distance.0 >= min_distance
    4404              :         };
    4405              : 
    4406              :         if check_for_image_layers {
    4407              :             self.last_image_layer_creation_check_at.store(lsn);
    4408              :         }
    4409              : 
    4410              :         for partition in partitioning.parts.iter() {
    4411              :             let img_range = start..partition.ranges.last().unwrap().end;
    4412              :             let compact_metadata = partition.overlaps(&Key::metadata_key_range());
    4413              :             if compact_metadata {
    4414              :                 for range in &partition.ranges {
    4415              :                     assert!(
    4416              :                         range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
    4417              :                             && range.end.field1 <= METADATA_KEY_END_PREFIX,
    4418              :                         "metadata keys must be partitioned separately"
    4419              :                     );
    4420              :                 }
    4421              :                 if mode == ImageLayerCreationMode::Initial {
    4422              :                     return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers")));
    4423              :                 }
    4424              :             } else if let ImageLayerCreationMode::Try = mode {
    4425              :                 // check_for_image_layers = false -> skip
    4426              :                 // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
    4427              :                 if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
    4428              :                     start = img_range.end;
    4429              :                     continue;
    4430              :                 }
    4431              :             }
    4432              : 
    4433              :             let image_layer_writer = ImageLayerWriter::new(
    4434              :                 self.conf,
    4435              :                 self.timeline_id,
    4436              :                 self.tenant_shard_id,
    4437              :                 &img_range,
    4438              :                 lsn,
    4439              :                 ctx,
    4440              :             )
    4441              :             .await?;
    4442              : 
    4443            0 :             fail_point!("image-layer-writer-fail-before-finish", |_| {
    4444            0 :                 Err(CreateImageLayersError::Other(anyhow::anyhow!(
    4445            0 :                     "failpoint image-layer-writer-fail-before-finish"
    4446            0 :                 )))
    4447            0 :             });
    4448              : 
    4449              :             if !compact_metadata {
    4450              :                 let ImageLayerCreationOutcome {
    4451              :                     image,
    4452              :                     next_start_key,
    4453              :                 } = self
    4454              :                     .create_image_layer_for_rel_blocks(
    4455              :                         partition,
    4456              :                         image_layer_writer,
    4457              :                         lsn,
    4458              :                         ctx,
    4459              :                         img_range,
    4460              :                         start,
    4461              :                     )
    4462              :                     .await?;
    4463              : 
    4464              :                 start = next_start_key;
    4465              :                 image_layers.extend(image);
    4466              :             } else {
    4467              :                 let ImageLayerCreationOutcome {
    4468              :                     image,
    4469              :                     next_start_key,
    4470              :                 } = self
    4471              :                     .create_image_layer_for_metadata_keys(
    4472              :                         partition,
    4473              :                         image_layer_writer,
    4474              :                         lsn,
    4475              :                         ctx,
    4476              :                         img_range,
    4477              :                         mode,
    4478              :                     )
    4479              :                     .await?;
    4480              :                 start = next_start_key;
    4481              :                 image_layers.extend(image);
    4482              :             }
    4483              :         }
    4484              : 
    4485              :         // The writer.finish() above already did the fsync of the inodes.
    4486              :         // We just need to fsync the directory in which these inodes are linked,
    4487              :         // which we know to be the timeline directory.
    4488              :         if !image_layers.is_empty() {
    4489              :             // We use fatal_err() below because the after writer.finish() returns with success,
    4490              :             // the in-memory state of the filesystem already has the layer file in its final place,
    4491              :             // and subsequent pageserver code could think it's durable while it really isn't.
    4492              :             let timeline_dir = VirtualFile::open(
    4493              :                 &self
    4494              :                     .conf
    4495              :                     .timeline_path(&self.tenant_shard_id, &self.timeline_id),
    4496              :                 ctx,
    4497              :             )
    4498              :             .await
    4499              :             .fatal_err("VirtualFile::open for timeline dir fsync");
    4500              :             timeline_dir
    4501              :                 .sync_all()
    4502              :                 .await
    4503              :                 .fatal_err("VirtualFile::sync_all timeline dir");
    4504              :         }
    4505              : 
    4506              :         let mut guard = self.layers.write().await;
    4507              : 
    4508              :         // FIXME: we could add the images to be uploaded *before* returning from here, but right
    4509              :         // now they are being scheduled outside of write lock
    4510              :         guard.track_new_image_layers(&image_layers, &self.metrics);
    4511              :         drop_wlock(guard);
    4512              :         timer.stop_and_record();
    4513              : 
    4514              :         Ok(image_layers)
    4515              :     }
    4516              : 
    4517              :     /// Wait until the background initial logical size calculation is complete, or
    4518              :     /// this Timeline is shut down.  Calling this function will cause the initial
    4519              :     /// logical size calculation to skip waiting for the background jobs barrier.
    4520            0 :     pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
    4521            0 :         if !self.shard_identity.is_shard_zero() {
    4522              :             // We don't populate logical size on shard >0: skip waiting for it.
    4523            0 :             return;
    4524            0 :         }
    4525            0 : 
    4526            0 :         if self.remote_client.is_deleting() {
    4527              :             // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
    4528            0 :             return;
    4529            0 :         }
    4530              : 
    4531            0 :         if let Some(await_bg_cancel) = self
    4532            0 :             .current_logical_size
    4533            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore
    4534            0 :             .get()
    4535            0 :         {
    4536            0 :             await_bg_cancel.cancel();
    4537            0 :         } else {
    4538              :             // We should not wait if we were not able to explicitly instruct
    4539              :             // the logical size cancellation to skip the concurrency limit semaphore.
    4540              :             // TODO: this is an unexpected case.  We should restructure so that it
    4541              :             // can't happen.
    4542            0 :             tracing::warn!(
    4543            0 :                 "await_initial_logical_size: can't get semaphore cancel token, skipping"
    4544              :             );
    4545            0 :             debug_assert!(false);
    4546              :         }
    4547              : 
    4548              :         tokio::select!(
    4549              :             _ = self.current_logical_size.initialized.acquire() => {},
    4550              :             _ = self.cancel.cancelled() => {}
    4551              :         )
    4552            0 :     }
    4553              : 
    4554              :     /// Detach this timeline from its ancestor by copying all of ancestors layers as this
    4555              :     /// Timelines layers up to the ancestor_lsn.
    4556              :     ///
    4557              :     /// Requires a timeline that:
    4558              :     /// - has an ancestor to detach from
    4559              :     /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
    4560              :     /// a technical requirement
    4561              :     ///
    4562              :     /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
    4563              :     /// polled again until completion.
    4564              :     ///
    4565              :     /// During the operation all timelines sharing the data with this timeline will be reparented
    4566              :     /// from our ancestor to be branches of this timeline.
    4567            0 :     pub(crate) async fn prepare_to_detach_from_ancestor(
    4568            0 :         self: &Arc<Timeline>,
    4569            0 :         tenant: &crate::tenant::Tenant,
    4570            0 :         options: detach_ancestor::Options,
    4571            0 :         ctx: &RequestContext,
    4572            0 :     ) -> Result<
    4573            0 :         (
    4574            0 :             completion::Completion,
    4575            0 :             detach_ancestor::PreparedTimelineDetach,
    4576            0 :         ),
    4577            0 :         detach_ancestor::Error,
    4578            0 :     > {
    4579            0 :         detach_ancestor::prepare(self, tenant, options, ctx).await
    4580            0 :     }
    4581              : 
    4582              :     /// Completes the ancestor detach. This method is to be called while holding the
    4583              :     /// TenantManager's tenant slot, so during this method we cannot be deleted nor can any
    4584              :     /// timeline be deleted. After this method returns successfully, tenant must be reloaded.
    4585              :     ///
    4586              :     /// Pageserver receiving a SIGKILL during this operation is not supported (yet).
    4587            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    4588            0 :         self: &Arc<Timeline>,
    4589            0 :         tenant: &crate::tenant::Tenant,
    4590            0 :         prepared: detach_ancestor::PreparedTimelineDetach,
    4591            0 :         ctx: &RequestContext,
    4592            0 :     ) -> Result<Vec<TimelineId>, anyhow::Error> {
    4593            0 :         detach_ancestor::complete(self, tenant, prepared, ctx).await
    4594            0 :     }
    4595              : }
    4596              : 
    4597              : /// Top-level failure to compact.
    4598            0 : #[derive(Debug, thiserror::Error)]
    4599              : pub(crate) enum CompactionError {
    4600              :     #[error("The timeline or pageserver is shutting down")]
    4601              :     ShuttingDown,
    4602              :     /// Compaction cannot be done right now; page reconstruction and so on.
    4603              :     #[error(transparent)]
    4604              :     Other(#[from] anyhow::Error),
    4605              : }
    4606              : 
    4607              : impl From<CollectKeySpaceError> for CompactionError {
    4608            0 :     fn from(err: CollectKeySpaceError) -> Self {
    4609            0 :         match err {
    4610              :             CollectKeySpaceError::Cancelled
    4611              :             | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
    4612            0 :                 CompactionError::ShuttingDown
    4613              :             }
    4614            0 :             e => CompactionError::Other(e.into()),
    4615              :         }
    4616            0 :     }
    4617              : }
    4618              : 
    4619              : #[serde_as]
    4620          196 : #[derive(serde::Serialize)]
    4621              : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
    4622              : 
    4623              : #[derive(Default)]
    4624              : enum DurationRecorder {
    4625              :     #[default]
    4626              :     NotStarted,
    4627              :     Recorded(RecordedDuration, tokio::time::Instant),
    4628              : }
    4629              : 
    4630              : impl DurationRecorder {
    4631          506 :     fn till_now(&self) -> DurationRecorder {
    4632          506 :         match self {
    4633              :             DurationRecorder::NotStarted => {
    4634            0 :                 panic!("must only call on recorded measurements")
    4635              :             }
    4636          506 :             DurationRecorder::Recorded(_, ended) => {
    4637          506 :                 let now = tokio::time::Instant::now();
    4638          506 :                 DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
    4639          506 :             }
    4640          506 :         }
    4641          506 :     }
    4642          196 :     fn into_recorded(self) -> Option<RecordedDuration> {
    4643          196 :         match self {
    4644            0 :             DurationRecorder::NotStarted => None,
    4645          196 :             DurationRecorder::Recorded(recorded, _) => Some(recorded),
    4646              :         }
    4647          196 :     }
    4648              : }
    4649              : 
    4650              : impl Timeline {
    4651           28 :     async fn finish_compact_batch(
    4652           28 :         self: &Arc<Self>,
    4653           28 :         new_deltas: &[ResidentLayer],
    4654           28 :         new_images: &[ResidentLayer],
    4655           28 :         layers_to_remove: &[Layer],
    4656           28 :     ) -> anyhow::Result<()> {
    4657           28 :         let mut guard = self.layers.write().await;
    4658              : 
    4659           28 :         let mut duplicated_layers = HashSet::new();
    4660           28 : 
    4661           28 :         let mut insert_layers = Vec::with_capacity(new_deltas.len());
    4662              : 
    4663          256 :         for l in new_deltas {
    4664          228 :             if guard.contains(l.as_ref()) {
    4665              :                 // expected in tests
    4666            0 :                 tracing::error!(layer=%l, "duplicated L1 layer");
    4667              : 
    4668              :                 // good ways to cause a duplicate: we repeatedly error after taking the writelock
    4669              :                 // `guard`  on self.layers. as of writing this, there are no error returns except
    4670              :                 // for compact_level0_phase1 creating an L0, which does not happen in practice
    4671              :                 // because we have not implemented L0 => L0 compaction.
    4672            0 :                 duplicated_layers.insert(l.layer_desc().key());
    4673          228 :             } else if LayerMap::is_l0(l.layer_desc()) {
    4674            0 :                 bail!("compaction generates a L0 layer file as output, which will cause infinite compaction.");
    4675          228 :             } else {
    4676          228 :                 insert_layers.push(l.clone());
    4677          228 :             }
    4678              :         }
    4679              : 
    4680              :         // only remove those inputs which were not outputs
    4681           28 :         let remove_layers: Vec<Layer> = layers_to_remove
    4682           28 :             .iter()
    4683          322 :             .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
    4684           28 :             .cloned()
    4685           28 :             .collect();
    4686           28 : 
    4687           28 :         if !new_images.is_empty() {
    4688            0 :             guard.track_new_image_layers(new_images, &self.metrics);
    4689           28 :         }
    4690              : 
    4691              :         // deletion will happen later, the layer file manager calls garbage_collect_on_drop
    4692           28 :         guard.finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
    4693           28 : 
    4694           28 :         self.remote_client
    4695           28 :             .schedule_compaction_update(&remove_layers, new_deltas)?;
    4696              : 
    4697           28 :         drop_wlock(guard);
    4698           28 : 
    4699           28 :         Ok(())
    4700           28 :     }
    4701              : 
    4702            0 :     async fn rewrite_layers(
    4703            0 :         self: &Arc<Self>,
    4704            0 :         replace_layers: Vec<(Layer, ResidentLayer)>,
    4705            0 :         drop_layers: Vec<Layer>,
    4706            0 :     ) -> anyhow::Result<()> {
    4707            0 :         let mut guard = self.layers.write().await;
    4708              : 
    4709            0 :         guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
    4710            0 : 
    4711            0 :         let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
    4712            0 : 
    4713            0 :         self.remote_client
    4714            0 :             .schedule_compaction_update(&drop_layers, &upload_layers)?;
    4715              : 
    4716            0 :         Ok(())
    4717            0 :     }
    4718              : 
    4719              :     /// Schedules the uploads of the given image layers
    4720          366 :     fn upload_new_image_layers(
    4721          366 :         self: &Arc<Self>,
    4722          366 :         new_images: impl IntoIterator<Item = ResidentLayer>,
    4723          366 :     ) -> anyhow::Result<()> {
    4724          396 :         for layer in new_images {
    4725           30 :             self.remote_client.schedule_layer_file_upload(layer)?;
    4726              :         }
    4727              :         // should any new image layer been created, not uploading index_part will
    4728              :         // result in a mismatch between remote_physical_size and layermap calculated
    4729              :         // size, which will fail some tests, but should not be an issue otherwise.
    4730          366 :         self.remote_client
    4731          366 :             .schedule_index_upload_for_file_changes()?;
    4732          366 :         Ok(())
    4733          366 :     }
    4734              : 
    4735              :     /// Find the Lsns above which layer files need to be retained on
    4736              :     /// garbage collection. This is separate from actually performing the GC,
    4737              :     /// and is updated more frequently, so that compaction can remove obsolete
    4738              :     /// page versions more aggressively.
    4739              :     ///
    4740              :     /// TODO: that's wishful thinking, compaction doesn't actually do that
    4741              :     /// currently.
    4742              :     ///
    4743              :     /// The 'cutoff_horizon' point is used to retain recent versions that might still be
    4744              :     /// needed by read-only nodes. (As of this writing, the caller just passes
    4745              :     /// the latest LSN subtracted by a constant, and doesn't do anything smart
    4746              :     /// to figure out what read-only nodes might actually need.)
    4747              :     ///
    4748              :     /// The 'pitr' duration is used to calculate a 'pitr_cutoff', which can be used to determine
    4749              :     /// whether a record is needed for PITR.
    4750         1512 :     #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
    4751              :     pub(super) async fn find_gc_cutoffs(
    4752              :         &self,
    4753              :         cutoff_horizon: Lsn,
    4754              :         pitr: Duration,
    4755              :         cancel: &CancellationToken,
    4756              :         ctx: &RequestContext,
    4757              :     ) -> anyhow::Result<GcCutoffs> {
    4758              :         let _timer = self
    4759              :             .metrics
    4760              :             .find_gc_cutoffs_histo
    4761              :             .start_timer()
    4762              :             .record_on_drop();
    4763              : 
    4764              :         pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
    4765              : 
    4766              :         // First, calculate pitr_cutoff_timestamp and then convert it to LSN.
    4767              :         //
    4768              :         // Some unit tests depend on garbage-collection working even when
    4769              :         // CLOG data is missing, so that find_lsn_for_timestamp() doesn't
    4770              :         // work, so avoid calling it altogether if time-based retention is not
    4771              :         // configured. It would be pointless anyway.
    4772              :         let pitr_cutoff = if pitr != Duration::ZERO {
    4773              :             let now = SystemTime::now();
    4774              :             if let Some(pitr_cutoff_timestamp) = now.checked_sub(pitr) {
    4775              :                 let pitr_timestamp = to_pg_timestamp(pitr_cutoff_timestamp);
    4776              : 
    4777              :                 match self
    4778              :                     .find_lsn_for_timestamp(pitr_timestamp, cancel, ctx)
    4779              :                     .await?
    4780              :                 {
    4781              :                     LsnForTimestamp::Present(lsn) => lsn,
    4782              :                     LsnForTimestamp::Future(lsn) => {
    4783              :                         // The timestamp is in the future. That sounds impossible,
    4784              :                         // but what it really means is that there hasn't been
    4785              :                         // any commits since the cutoff timestamp.
    4786              :                         //
    4787              :                         // In this case we should use the LSN of the most recent commit,
    4788              :                         // which is implicitly the last LSN in the log.
    4789              :                         debug!("future({})", lsn);
    4790              :                         self.get_last_record_lsn()
    4791              :                     }
    4792              :                     LsnForTimestamp::Past(lsn) => {
    4793              :                         debug!("past({})", lsn);
    4794              :                         // conservative, safe default is to remove nothing, when we
    4795              :                         // have no commit timestamp data available
    4796              :                         *self.get_latest_gc_cutoff_lsn()
    4797              :                     }
    4798              :                     LsnForTimestamp::NoData(lsn) => {
    4799              :                         debug!("nodata({})", lsn);
    4800              :                         // conservative, safe default is to remove nothing, when we
    4801              :                         // have no commit timestamp data available
    4802              :                         *self.get_latest_gc_cutoff_lsn()
    4803              :                     }
    4804              :                 }
    4805              :             } else {
    4806              :                 // If we don't have enough data to convert to LSN,
    4807              :                 // play safe and don't remove any layers.
    4808              :                 *self.get_latest_gc_cutoff_lsn()
    4809              :             }
    4810              :         } else {
    4811              :             // No time-based retention was configured. Interpret this as "keep no history".
    4812              :             self.get_last_record_lsn()
    4813              :         };
    4814              : 
    4815              :         Ok(GcCutoffs {
    4816              :             horizon: cutoff_horizon,
    4817              :             pitr: pitr_cutoff,
    4818              :         })
    4819              :     }
    4820              : 
    4821              :     /// Garbage collect layer files on a timeline that are no longer needed.
    4822              :     ///
    4823              :     /// Currently, we don't make any attempt at removing unneeded page versions
    4824              :     /// within a layer file. We can only remove the whole file if it's fully
    4825              :     /// obsolete.
    4826          756 :     pub(super) async fn gc(&self) -> anyhow::Result<GcResult> {
    4827              :         // this is most likely the background tasks, but it might be the spawned task from
    4828              :         // immediate_gc
    4829          755 :         let _g = tokio::select! {
    4830              :             guard = self.gc_lock.lock() => guard,
    4831              :             _ = self.cancel.cancelled() => return Ok(GcResult::default()),
    4832              :         };
    4833          755 :         let timer = self.metrics.garbage_collect_histo.start_timer();
    4834              : 
    4835              :         fail_point!("before-timeline-gc");
    4836              : 
    4837              :         // Is the timeline being deleted?
    4838          755 :         if self.is_stopping() {
    4839            0 :             anyhow::bail!("timeline is Stopping");
    4840          755 :         }
    4841          755 : 
    4842          755 :         let (horizon_cutoff, pitr_cutoff, retain_lsns) = {
    4843          755 :             let gc_info = self.gc_info.read().unwrap();
    4844          755 : 
    4845          755 :             let horizon_cutoff = min(gc_info.cutoffs.horizon, self.get_disk_consistent_lsn());
    4846          755 :             let pitr_cutoff = gc_info.cutoffs.pitr;
    4847          755 :             let retain_lsns = gc_info.retain_lsns.clone();
    4848          755 :             (horizon_cutoff, pitr_cutoff, retain_lsns)
    4849          755 :         };
    4850          755 : 
    4851          755 :         let mut new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
    4852          755 :         let standby_horizon = self.standby_horizon.load();
    4853          755 :         // Hold GC for the standby, but as a safety guard do it only within some
    4854          755 :         // reasonable lag.
    4855          755 :         if standby_horizon != Lsn::INVALID {
    4856            0 :             if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
    4857              :                 const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
    4858            0 :                 if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
    4859            0 :                     new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
    4860            0 :                     trace!("holding off GC for standby apply LSN {}", standby_horizon);
    4861              :                 } else {
    4862            0 :                     warn!(
    4863            0 :                         "standby is lagging for more than {}MB, not holding gc for it",
    4864            0 :                         MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
    4865              :                     )
    4866              :                 }
    4867            0 :             }
    4868          755 :         }
    4869              : 
    4870              :         // Reset standby horizon to ignore it if it is not updated till next GC.
    4871              :         // It is an easy way to unset it when standby disappears without adding
    4872              :         // more conf options.
    4873          755 :         self.standby_horizon.store(Lsn::INVALID);
    4874          755 :         self.metrics
    4875          755 :             .standby_horizon_gauge
    4876          755 :             .set(Lsn::INVALID.0 as i64);
    4877              : 
    4878          755 :         let res = self
    4879          755 :             .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff)
    4880          755 :             .instrument(
    4881          755 :                 info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
    4882              :             )
    4883            0 :             .await?;
    4884              : 
    4885              :         // only record successes
    4886          755 :         timer.stop_and_record();
    4887          755 : 
    4888          755 :         Ok(res)
    4889          756 :     }
    4890              : 
    4891          755 :     async fn gc_timeline(
    4892          755 :         &self,
    4893          755 :         horizon_cutoff: Lsn,
    4894          755 :         pitr_cutoff: Lsn,
    4895          755 :         retain_lsns: Vec<Lsn>,
    4896          755 :         new_gc_cutoff: Lsn,
    4897          755 :     ) -> anyhow::Result<GcResult> {
    4898          755 :         // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
    4899          755 : 
    4900          755 :         let now = SystemTime::now();
    4901          755 :         let mut result: GcResult = GcResult::default();
    4902          755 : 
    4903          755 :         // Nothing to GC. Return early.
    4904          755 :         let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
    4905          755 :         if latest_gc_cutoff >= new_gc_cutoff {
    4906           22 :             info!(
    4907            0 :                 "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
    4908              :             );
    4909           22 :             return Ok(result);
    4910          733 :         }
    4911              : 
    4912              :         // We need to ensure that no one tries to read page versions or create
    4913              :         // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
    4914              :         // for details. This will block until the old value is no longer in use.
    4915              :         //
    4916              :         // The GC cutoff should only ever move forwards.
    4917          733 :         let waitlist = {
    4918          733 :             let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
    4919          733 :             ensure!(
    4920          733 :                 *write_guard <= new_gc_cutoff,
    4921            0 :                 "Cannot move GC cutoff LSN backwards (was {}, new {})",
    4922            0 :                 *write_guard,
    4923              :                 new_gc_cutoff
    4924              :             );
    4925          733 :             write_guard.store_and_unlock(new_gc_cutoff)
    4926          733 :         };
    4927          733 :         waitlist.wait().await;
    4928              : 
    4929          733 :         info!("GC starting");
    4930              : 
    4931          733 :         debug!("retain_lsns: {:?}", retain_lsns);
    4932              : 
    4933          733 :         let mut layers_to_remove = Vec::new();
    4934              : 
    4935              :         // Scan all layers in the timeline (remote or on-disk).
    4936              :         //
    4937              :         // Garbage collect the layer if all conditions are satisfied:
    4938              :         // 1. it is older than cutoff LSN;
    4939              :         // 2. it is older than PITR interval;
    4940              :         // 3. it doesn't need to be retained for 'retain_lsns';
    4941              :         // 4. newer on-disk image layers cover the layer's whole key range
    4942              :         //
    4943              :         // TODO holding a write lock is too agressive and avoidable
    4944          733 :         let mut guard = self.layers.write().await;
    4945          733 :         let layers = guard.layer_map();
    4946        12411 :         'outer: for l in layers.iter_historic_layers() {
    4947        12411 :             result.layers_total += 1;
    4948        12411 : 
    4949        12411 :             // 1. Is it newer than GC horizon cutoff point?
    4950        12411 :             if l.get_lsn_range().end > horizon_cutoff {
    4951          753 :                 debug!(
    4952            0 :                     "keeping {} because it's newer than horizon_cutoff {}",
    4953            0 :                     l.layer_name(),
    4954              :                     horizon_cutoff,
    4955              :                 );
    4956          753 :                 result.layers_needed_by_cutoff += 1;
    4957          753 :                 continue 'outer;
    4958        11658 :             }
    4959        11658 : 
    4960        11658 :             // 2. It is newer than PiTR cutoff point?
    4961        11658 :             if l.get_lsn_range().end > pitr_cutoff {
    4962            0 :                 debug!(
    4963            0 :                     "keeping {} because it's newer than pitr_cutoff {}",
    4964            0 :                     l.layer_name(),
    4965              :                     pitr_cutoff,
    4966              :                 );
    4967            0 :                 result.layers_needed_by_pitr += 1;
    4968            0 :                 continue 'outer;
    4969        11658 :             }
    4970              : 
    4971              :             // 3. Is it needed by a child branch?
    4972              :             // NOTE With that we would keep data that
    4973              :             // might be referenced by child branches forever.
    4974              :             // We can track this in child timeline GC and delete parent layers when
    4975              :             // they are no longer needed. This might be complicated with long inheritance chains.
    4976              :             //
    4977              :             // TODO Vec is not a great choice for `retain_lsns`
    4978        11658 :             for retain_lsn in &retain_lsns {
    4979              :                 // start_lsn is inclusive
    4980           10 :                 if &l.get_lsn_range().start <= retain_lsn {
    4981           10 :                     debug!(
    4982            0 :                         "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
    4983            0 :                         l.layer_name(),
    4984            0 :                         retain_lsn,
    4985            0 :                         l.is_incremental(),
    4986              :                     );
    4987           10 :                     result.layers_needed_by_branches += 1;
    4988           10 :                     continue 'outer;
    4989            0 :                 }
    4990              :             }
    4991              : 
    4992              :             // 4. Is there a later on-disk layer for this relation?
    4993              :             //
    4994              :             // The end-LSN is exclusive, while disk_consistent_lsn is
    4995              :             // inclusive. For example, if disk_consistent_lsn is 100, it is
    4996              :             // OK for a delta layer to have end LSN 101, but if the end LSN
    4997              :             // is 102, then it might not have been fully flushed to disk
    4998              :             // before crash.
    4999              :             //
    5000              :             // For example, imagine that the following layers exist:
    5001              :             //
    5002              :             // 1000      - image (A)
    5003              :             // 1000-2000 - delta (B)
    5004              :             // 2000      - image (C)
    5005              :             // 2000-3000 - delta (D)
    5006              :             // 3000      - image (E)
    5007              :             //
    5008              :             // If GC horizon is at 2500, we can remove layers A and B, but
    5009              :             // we cannot remove C, even though it's older than 2500, because
    5010              :             // the delta layer 2000-3000 depends on it.
    5011        11648 :             if !layers
    5012        11648 :                 .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
    5013              :             {
    5014        11642 :                 debug!("keeping {} because it is the latest layer", l.layer_name());
    5015        11642 :                 result.layers_not_updated += 1;
    5016        11642 :                 continue 'outer;
    5017            6 :             }
    5018            6 : 
    5019            6 :             // We didn't find any reason to keep this file, so remove it.
    5020            6 :             debug!(
    5021            0 :                 "garbage collecting {} is_dropped: xx is_incremental: {}",
    5022            0 :                 l.layer_name(),
    5023            0 :                 l.is_incremental(),
    5024              :             );
    5025            6 :             layers_to_remove.push(l);
    5026              :         }
    5027              : 
    5028          733 :         if !layers_to_remove.is_empty() {
    5029              :             // Persist the new GC cutoff value before we actually remove anything.
    5030              :             // This unconditionally schedules also an index_part.json update, even though, we will
    5031              :             // be doing one a bit later with the unlinked gc'd layers.
    5032            4 :             let disk_consistent_lsn = self.disk_consistent_lsn.load();
    5033            4 :             self.schedule_uploads(disk_consistent_lsn, None)?;
    5034              : 
    5035            4 :             let gc_layers = layers_to_remove
    5036            4 :                 .iter()
    5037            6 :                 .map(|x| guard.get_from_desc(x))
    5038            4 :                 .collect::<Vec<Layer>>();
    5039            4 : 
    5040            4 :             result.layers_removed = gc_layers.len() as u64;
    5041            4 : 
    5042            4 :             self.remote_client.schedule_gc_update(&gc_layers)?;
    5043              : 
    5044            4 :             guard.finish_gc_timeline(&gc_layers);
    5045            4 : 
    5046            4 :             #[cfg(feature = "testing")]
    5047            4 :             {
    5048            4 :                 result.doomed_layers = gc_layers;
    5049            4 :             }
    5050          729 :         }
    5051              : 
    5052          733 :         info!(
    5053            0 :             "GC completed removing {} layers, cutoff {}",
    5054              :             result.layers_removed, new_gc_cutoff
    5055              :         );
    5056              : 
    5057          733 :         result.elapsed = now.elapsed()?;
    5058          733 :         Ok(result)
    5059          755 :     }
    5060              : 
    5061              :     /// Reconstruct a value, using the given base image and WAL records in 'data'.
    5062       703663 :     async fn reconstruct_value(
    5063       703663 :         &self,
    5064       703663 :         key: Key,
    5065       703663 :         request_lsn: Lsn,
    5066       703663 :         mut data: ValueReconstructState,
    5067       703663 :     ) -> Result<Bytes, PageReconstructError> {
    5068       703663 :         // Perform WAL redo if needed
    5069       703663 :         data.records.reverse();
    5070       703663 : 
    5071       703663 :         // If we have a page image, and no WAL, we're all set
    5072       703663 :         if data.records.is_empty() {
    5073       703651 :             if let Some((img_lsn, img)) = &data.img {
    5074       703651 :                 trace!(
    5075            0 :                     "found page image for key {} at {}, no WAL redo required, req LSN {}",
    5076              :                     key,
    5077              :                     img_lsn,
    5078              :                     request_lsn,
    5079              :                 );
    5080       703651 :                 Ok(img.clone())
    5081              :             } else {
    5082            0 :                 Err(PageReconstructError::from(anyhow!(
    5083            0 :                     "base image for {key} at {request_lsn} not found"
    5084            0 :                 )))
    5085              :             }
    5086              :         } else {
    5087              :             // We need to do WAL redo.
    5088              :             //
    5089              :             // If we don't have a base image, then the oldest WAL record better initialize
    5090              :             // the page
    5091           12 :             if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
    5092            0 :                 Err(PageReconstructError::from(anyhow!(
    5093            0 :                     "Base image for {} at {} not found, but got {} WAL records",
    5094            0 :                     key,
    5095            0 :                     request_lsn,
    5096            0 :                     data.records.len()
    5097            0 :                 )))
    5098              :             } else {
    5099           12 :                 if data.img.is_some() {
    5100           12 :                     trace!(
    5101            0 :                         "found {} WAL records and a base image for {} at {}, performing WAL redo",
    5102            0 :                         data.records.len(),
    5103              :                         key,
    5104              :                         request_lsn
    5105              :                     );
    5106              :                 } else {
    5107            0 :                     trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
    5108              :                 };
    5109              : 
    5110           12 :                 let last_rec_lsn = data.records.last().unwrap().0;
    5111              : 
    5112           12 :                 let img = match self
    5113           12 :                     .walredo_mgr
    5114           12 :                     .as_ref()
    5115           12 :                     .context("timeline has no walredo manager")
    5116           12 :                     .map_err(PageReconstructError::WalRedo)?
    5117           12 :                     .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
    5118            0 :                     .await
    5119           12 :                     .context("reconstruct a page image")
    5120              :                 {
    5121           12 :                     Ok(img) => img,
    5122            0 :                     Err(e) => return Err(PageReconstructError::WalRedo(e)),
    5123              :                 };
    5124              : 
    5125           12 :                 if img.len() == page_cache::PAGE_SZ {
    5126            0 :                     let cache = page_cache::get();
    5127            0 :                     if let Err(e) = cache
    5128            0 :                         .memorize_materialized_page(
    5129            0 :                             self.tenant_shard_id,
    5130            0 :                             self.timeline_id,
    5131            0 :                             key,
    5132            0 :                             last_rec_lsn,
    5133            0 :                             &img,
    5134            0 :                         )
    5135            0 :                         .await
    5136            0 :                         .context("Materialized page memoization failed")
    5137              :                     {
    5138            0 :                         return Err(PageReconstructError::from(e));
    5139            0 :                     }
    5140           12 :                 }
    5141              : 
    5142           12 :                 Ok(img)
    5143              :             }
    5144              :         }
    5145       703663 :     }
    5146              : 
    5147            0 :     pub(crate) async fn spawn_download_all_remote_layers(
    5148            0 :         self: Arc<Self>,
    5149            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5150            0 :     ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
    5151            0 :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5152            0 : 
    5153            0 :         // this is not really needed anymore; it has tests which really check the return value from
    5154            0 :         // http api. it would be better not to maintain this anymore.
    5155            0 : 
    5156            0 :         let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
    5157            0 :         if let Some(st) = &*status_guard {
    5158            0 :             match &st.state {
    5159              :                 DownloadRemoteLayersTaskState::Running => {
    5160            0 :                     return Err(st.clone());
    5161              :                 }
    5162              :                 DownloadRemoteLayersTaskState::ShutDown
    5163            0 :                 | DownloadRemoteLayersTaskState::Completed => {
    5164            0 :                     *status_guard = None;
    5165            0 :                 }
    5166              :             }
    5167            0 :         }
    5168              : 
    5169            0 :         let self_clone = Arc::clone(&self);
    5170            0 :         let task_id = task_mgr::spawn(
    5171            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    5172            0 :             task_mgr::TaskKind::DownloadAllRemoteLayers,
    5173            0 :             Some(self.tenant_shard_id),
    5174            0 :             Some(self.timeline_id),
    5175            0 :             "download all remote layers task",
    5176              :             false,
    5177            0 :             async move {
    5178            0 :                 self_clone.download_all_remote_layers(request).await;
    5179            0 :                 let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
    5180            0 :                  match &mut *status_guard {
    5181              :                     None => {
    5182            0 :                         warn!("tasks status is supposed to be Some(), since we are running");
    5183              :                     }
    5184            0 :                     Some(st) => {
    5185            0 :                         let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
    5186            0 :                         if st.task_id != exp_task_id {
    5187            0 :                             warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
    5188            0 :                         } else {
    5189            0 :                             st.state = DownloadRemoteLayersTaskState::Completed;
    5190            0 :                         }
    5191              :                     }
    5192              :                 };
    5193            0 :                 Ok(())
    5194            0 :             }
    5195            0 :             .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    5196              :         );
    5197              : 
    5198            0 :         let initial_info = DownloadRemoteLayersTaskInfo {
    5199            0 :             task_id: format!("{task_id}"),
    5200            0 :             state: DownloadRemoteLayersTaskState::Running,
    5201            0 :             total_layer_count: 0,
    5202            0 :             successful_download_count: 0,
    5203            0 :             failed_download_count: 0,
    5204            0 :         };
    5205            0 :         *status_guard = Some(initial_info.clone());
    5206            0 : 
    5207            0 :         Ok(initial_info)
    5208            0 :     }
    5209              : 
    5210            0 :     async fn download_all_remote_layers(
    5211            0 :         self: &Arc<Self>,
    5212            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5213            0 :     ) {
    5214              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5215              : 
    5216            0 :         let remaining = {
    5217            0 :             let guard = self.layers.read().await;
    5218            0 :             guard
    5219            0 :                 .layer_map()
    5220            0 :                 .iter_historic_layers()
    5221            0 :                 .map(|desc| guard.get_from_desc(&desc))
    5222            0 :                 .collect::<Vec<_>>()
    5223            0 :         };
    5224            0 :         let total_layer_count = remaining.len();
    5225            0 : 
    5226            0 :         macro_rules! lock_status {
    5227            0 :             ($st:ident) => {
    5228            0 :                 let mut st = self.download_all_remote_layers_task_info.write().unwrap();
    5229            0 :                 let st = st
    5230            0 :                     .as_mut()
    5231            0 :                     .expect("this function is only called after the task has been spawned");
    5232            0 :                 assert_eq!(
    5233            0 :                     st.task_id,
    5234            0 :                     format!(
    5235            0 :                         "{}",
    5236            0 :                         task_mgr::current_task_id().expect("we run inside a task_mgr task")
    5237            0 :                     )
    5238            0 :                 );
    5239            0 :                 let $st = st;
    5240            0 :             };
    5241            0 :         }
    5242            0 : 
    5243            0 :         {
    5244            0 :             lock_status!(st);
    5245            0 :             st.total_layer_count = total_layer_count as u64;
    5246            0 :         }
    5247            0 : 
    5248            0 :         let mut remaining = remaining.into_iter();
    5249            0 :         let mut have_remaining = true;
    5250            0 :         let mut js = tokio::task::JoinSet::new();
    5251            0 : 
    5252            0 :         let cancel = task_mgr::shutdown_token();
    5253            0 : 
    5254            0 :         let limit = request.max_concurrent_downloads;
    5255              : 
    5256              :         loop {
    5257            0 :             while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
    5258            0 :                 let Some(next) = remaining.next() else {
    5259            0 :                     have_remaining = false;
    5260            0 :                     break;
    5261              :                 };
    5262              : 
    5263            0 :                 let span = tracing::info_span!("download", layer = %next);
    5264              : 
    5265            0 :                 js.spawn(
    5266            0 :                     async move {
    5267            0 :                         let res = next.download().await;
    5268            0 :                         (next, res)
    5269            0 :                     }
    5270            0 :                     .instrument(span),
    5271            0 :                 );
    5272              :             }
    5273              : 
    5274            0 :             while let Some(res) = js.join_next().await {
    5275            0 :                 match res {
    5276              :                     Ok((_, Ok(_))) => {
    5277            0 :                         lock_status!(st);
    5278            0 :                         st.successful_download_count += 1;
    5279              :                     }
    5280            0 :                     Ok((layer, Err(e))) => {
    5281            0 :                         tracing::error!(%layer, "download failed: {e:#}");
    5282            0 :                         lock_status!(st);
    5283            0 :                         st.failed_download_count += 1;
    5284              :                     }
    5285            0 :                     Err(je) if je.is_cancelled() => unreachable!("not used here"),
    5286            0 :                     Err(je) if je.is_panic() => {
    5287            0 :                         lock_status!(st);
    5288            0 :                         st.failed_download_count += 1;
    5289              :                     }
    5290            0 :                     Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
    5291              :                 }
    5292              :             }
    5293              : 
    5294            0 :             if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
    5295            0 :                 break;
    5296            0 :             }
    5297              :         }
    5298              : 
    5299              :         {
    5300            0 :             lock_status!(st);
    5301            0 :             st.state = DownloadRemoteLayersTaskState::Completed;
    5302            0 :         }
    5303            0 :     }
    5304              : 
    5305            0 :     pub(crate) fn get_download_all_remote_layers_task_info(
    5306            0 :         &self,
    5307            0 :     ) -> Option<DownloadRemoteLayersTaskInfo> {
    5308            0 :         self.download_all_remote_layers_task_info
    5309            0 :             .read()
    5310            0 :             .unwrap()
    5311            0 :             .clone()
    5312            0 :     }
    5313              : }
    5314              : 
    5315              : impl Timeline {
    5316              :     /// Returns non-remote layers for eviction.
    5317            0 :     pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
    5318            0 :         let guard = self.layers.read().await;
    5319            0 :         let mut max_layer_size: Option<u64> = None;
    5320            0 : 
    5321            0 :         let resident_layers = guard
    5322            0 :             .likely_resident_layers()
    5323            0 :             .map(|layer| {
    5324            0 :                 let file_size = layer.layer_desc().file_size;
    5325            0 :                 max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
    5326            0 : 
    5327            0 :                 let last_activity_ts = layer.access_stats().latest_activity_or_now();
    5328            0 : 
    5329            0 :                 EvictionCandidate {
    5330            0 :                     layer: layer.into(),
    5331            0 :                     last_activity_ts,
    5332            0 :                     relative_last_activity: finite_f32::FiniteF32::ZERO,
    5333            0 :                 }
    5334            0 :             })
    5335            0 :             .collect();
    5336            0 : 
    5337            0 :         DiskUsageEvictionInfo {
    5338            0 :             max_layer_size,
    5339            0 :             resident_layers,
    5340            0 :         }
    5341            0 :     }
    5342              : 
    5343         1300 :     pub(crate) fn get_shard_index(&self) -> ShardIndex {
    5344         1300 :         ShardIndex {
    5345         1300 :             shard_number: self.tenant_shard_id.shard_number,
    5346         1300 :             shard_count: self.tenant_shard_id.shard_count,
    5347         1300 :         }
    5348         1300 :     }
    5349              : }
    5350              : 
    5351              : type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId);
    5352              : 
    5353              : struct TimelineWriterState {
    5354              :     open_layer: Arc<InMemoryLayer>,
    5355              :     current_size: u64,
    5356              :     // Previous Lsn which passed through
    5357              :     prev_lsn: Option<Lsn>,
    5358              :     // Largest Lsn which passed through the current writer
    5359              :     max_lsn: Option<Lsn>,
    5360              :     // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
    5361              :     cached_last_freeze_at: Lsn,
    5362              : }
    5363              : 
    5364              : impl TimelineWriterState {
    5365      4804100 :     fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
    5366      4804100 :         Self {
    5367      4804100 :             open_layer,
    5368      4804100 :             current_size,
    5369      4804100 :             prev_lsn: None,
    5370      4804100 :             max_lsn: None,
    5371      4804100 :             cached_last_freeze_at: last_freeze_at,
    5372      4804100 :         }
    5373      4804100 :     }
    5374              : }
    5375              : 
    5376              : /// Various functions to mutate the timeline.
    5377              : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
    5378              : // This is probably considered a bad practice in Rust and should be fixed eventually,
    5379              : // but will cause large code changes.
    5380              : pub(crate) struct TimelineWriter<'a> {
    5381              :     tl: &'a Timeline,
    5382              :     write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
    5383              : }
    5384              : 
    5385              : impl Deref for TimelineWriter<'_> {
    5386              :     type Target = Timeline;
    5387              : 
    5388      4806832 :     fn deref(&self) -> &Self::Target {
    5389      4806832 :         self.tl
    5390      4806832 :     }
    5391              : }
    5392              : 
    5393              : impl Drop for TimelineWriter<'_> {
    5394      5133116 :     fn drop(&mut self) {
    5395      5133116 :         self.write_guard.take();
    5396      5133116 :     }
    5397              : }
    5398              : 
    5399              : #[derive(PartialEq)]
    5400              : enum OpenLayerAction {
    5401              :     Roll,
    5402              :     Open,
    5403              :     None,
    5404              : }
    5405              : 
    5406              : impl<'a> TimelineWriter<'a> {
    5407              :     /// Put a new page version that can be constructed from a WAL record
    5408              :     ///
    5409              :     /// This will implicitly extend the relation, if the page is beyond the
    5410              :     /// current end-of-file.
    5411      5090196 :     pub(crate) async fn put(
    5412      5090196 :         &mut self,
    5413      5090196 :         key: Key,
    5414      5090196 :         lsn: Lsn,
    5415      5090196 :         value: &Value,
    5416      5090196 :         ctx: &RequestContext,
    5417      5090196 :     ) -> anyhow::Result<()> {
    5418      5090196 :         // Avoid doing allocations for "small" values.
    5419      5090196 :         // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
    5420      5090196 :         // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
    5421      5090196 :         let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
    5422      5090196 :         value.ser_into(&mut buf)?;
    5423      5090196 :         let buf_size: u64 = buf.len().try_into().expect("oversized value buf");
    5424      5090196 : 
    5425      5090196 :         let action = self.get_open_layer_action(lsn, buf_size);
    5426      5090196 :         let layer = self.handle_open_layer_action(lsn, action, ctx).await?;
    5427      5090196 :         let res = layer.put_value(key, lsn, &buf, ctx).await;
    5428              : 
    5429      5090196 :         if res.is_ok() {
    5430      5090196 :             // Update the current size only when the entire write was ok.
    5431      5090196 :             // In case of failures, we may have had partial writes which
    5432      5090196 :             // render the size tracking out of sync. That's ok because
    5433      5090196 :             // the checkpoint distance should be significantly smaller
    5434      5090196 :             // than the S3 single shot upload limit of 5GiB.
    5435      5090196 :             let state = self.write_guard.as_mut().unwrap();
    5436      5090196 : 
    5437      5090196 :             state.current_size += buf_size;
    5438      5090196 :             state.prev_lsn = Some(lsn);
    5439      5090196 :             state.max_lsn = std::cmp::max(state.max_lsn, Some(lsn));
    5440      5090196 :         }
    5441              : 
    5442      5090196 :         res
    5443      5090196 :     }
    5444              : 
    5445      5090198 :     async fn handle_open_layer_action(
    5446      5090198 :         &mut self,
    5447      5090198 :         at: Lsn,
    5448      5090198 :         action: OpenLayerAction,
    5449      5090198 :         ctx: &RequestContext,
    5450      5090198 :     ) -> anyhow::Result<&Arc<InMemoryLayer>> {
    5451      5090198 :         match action {
    5452              :             OpenLayerAction::Roll => {
    5453            0 :                 let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
    5454            0 :                 self.roll_layer(freeze_at).await?;
    5455            0 :                 self.open_layer(at, ctx).await?;
    5456              :             }
    5457      4804100 :             OpenLayerAction::Open => self.open_layer(at, ctx).await?,
    5458              :             OpenLayerAction::None => {
    5459       286098 :                 assert!(self.write_guard.is_some());
    5460              :             }
    5461              :         }
    5462              : 
    5463      5090198 :         Ok(&self.write_guard.as_ref().unwrap().open_layer)
    5464      5090198 :     }
    5465              : 
    5466      4804100 :     async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
    5467      4804100 :         let layer = self.tl.get_layer_for_write(at, ctx).await?;
    5468      4804100 :         let initial_size = layer.size().await?;
    5469              : 
    5470      4804100 :         let last_freeze_at = self.last_freeze_at.load();
    5471      4804100 :         self.write_guard.replace(TimelineWriterState::new(
    5472      4804100 :             layer,
    5473      4804100 :             initial_size,
    5474      4804100 :             last_freeze_at,
    5475      4804100 :         ));
    5476      4804100 : 
    5477      4804100 :         Ok(())
    5478      4804100 :     }
    5479              : 
    5480            0 :     async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> {
    5481            0 :         assert!(self.write_guard.is_some());
    5482              : 
    5483            0 :         self.tl.freeze_inmem_layer_at(freeze_at).await;
    5484              : 
    5485            0 :         let now = Instant::now();
    5486            0 :         *(self.last_freeze_ts.write().unwrap()) = now;
    5487            0 : 
    5488            0 :         self.tl.flush_frozen_layers();
    5489            0 : 
    5490            0 :         let current_size = self.write_guard.as_ref().unwrap().current_size;
    5491            0 :         if current_size > self.get_checkpoint_distance() {
    5492            0 :             warn!("Flushed oversized open layer with size {}", current_size)
    5493            0 :         }
    5494              : 
    5495            0 :         Ok(())
    5496            0 :     }
    5497              : 
    5498      5090198 :     fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
    5499      5090198 :         let state = &*self.write_guard;
    5500      5090198 :         let Some(state) = &state else {
    5501      4804100 :             return OpenLayerAction::Open;
    5502              :         };
    5503              : 
    5504       286098 :         if state.prev_lsn == Some(lsn) {
    5505              :             // Rolling mid LSN is not supported by downstream code.
    5506              :             // Hence, only roll at LSN boundaries.
    5507       286040 :             return OpenLayerAction::None;
    5508           58 :         }
    5509           58 : 
    5510           58 :         if state.current_size == 0 {
    5511              :             // Don't roll empty layers
    5512            0 :             return OpenLayerAction::None;
    5513           58 :         }
    5514           58 : 
    5515           58 :         if self.tl.should_roll(
    5516           58 :             state.current_size,
    5517           58 :             state.current_size + new_value_size,
    5518           58 :             self.get_checkpoint_distance(),
    5519           58 :             lsn,
    5520           58 :             state.cached_last_freeze_at,
    5521           58 :             state.open_layer.get_opened_at(),
    5522           58 :         ) {
    5523            0 :             OpenLayerAction::Roll
    5524              :         } else {
    5525           58 :             OpenLayerAction::None
    5526              :         }
    5527      5090198 :     }
    5528              : 
    5529              :     /// Put a batch of keys at the specified Lsns.
    5530              :     ///
    5531              :     /// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
    5532       413994 :     pub(crate) async fn put_batch(
    5533       413994 :         &mut self,
    5534       413994 :         batch: VecMap<Lsn, (Key, Value)>,
    5535       413994 :         ctx: &RequestContext,
    5536       413994 :     ) -> anyhow::Result<()> {
    5537      1114028 :         for (lsn, (key, val)) in batch {
    5538       700034 :             self.put(key, lsn, &val, ctx).await?
    5539              :         }
    5540              : 
    5541       413994 :         Ok(())
    5542       413994 :     }
    5543              : 
    5544            2 :     pub(crate) async fn delete_batch(
    5545            2 :         &mut self,
    5546            2 :         batch: &[(Range<Key>, Lsn)],
    5547            2 :         ctx: &RequestContext,
    5548            2 :     ) -> anyhow::Result<()> {
    5549            2 :         if let Some((_, lsn)) = batch.first() {
    5550            2 :             let action = self.get_open_layer_action(*lsn, 0);
    5551            2 :             let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
    5552            2 :             layer.put_tombstones(batch).await?;
    5553            0 :         }
    5554              : 
    5555            2 :         Ok(())
    5556            2 :     }
    5557              : 
    5558              :     /// Track the end of the latest digested WAL record.
    5559              :     /// Remember the (end of) last valid WAL record remembered in the timeline.
    5560              :     ///
    5561              :     /// Call this after you have finished writing all the WAL up to 'lsn'.
    5562              :     ///
    5563              :     /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
    5564              :     /// the 'lsn' or anything older. The previous last record LSN is stored alongside
    5565              :     /// the latest and can be read.
    5566      5279018 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    5567      5279018 :         self.tl.finish_write(new_lsn);
    5568      5279018 :     }
    5569              : 
    5570       270570 :     pub(crate) fn update_current_logical_size(&self, delta: i64) {
    5571       270570 :         self.tl.update_current_logical_size(delta)
    5572       270570 :     }
    5573              : }
    5574              : 
    5575              : // We need TimelineWriter to be send in upcoming conversion of
    5576              : // Timeline::layers to tokio::sync::RwLock.
    5577              : #[test]
    5578            2 : fn is_send() {
    5579            2 :     fn _assert_send<T: Send>() {}
    5580            2 :     _assert_send::<TimelineWriter<'_>>();
    5581            2 : }
    5582              : 
    5583              : /// Add a suffix to a layer file's name: .{num}.old
    5584              : /// Uses the first available num (starts at 0)
    5585            0 : fn rename_to_backup(path: &Utf8Path) -> anyhow::Result<()> {
    5586            0 :     let filename = path
    5587            0 :         .file_name()
    5588            0 :         .ok_or_else(|| anyhow!("Path {path} don't have a file name"))?;
    5589            0 :     let mut new_path = path.to_owned();
    5590              : 
    5591            0 :     for i in 0u32.. {
    5592            0 :         new_path.set_file_name(format!("{filename}.{i}.old"));
    5593            0 :         if !new_path.exists() {
    5594            0 :             std::fs::rename(path, &new_path)
    5595            0 :                 .with_context(|| format!("rename {path:?} to {new_path:?}"))?;
    5596            0 :             return Ok(());
    5597            0 :         }
    5598              :     }
    5599              : 
    5600            0 :     bail!("couldn't find an unused backup number for {:?}", path)
    5601            0 : }
    5602              : 
    5603              : #[cfg(test)]
    5604              : mod tests {
    5605              :     use utils::{id::TimelineId, lsn::Lsn};
    5606              : 
    5607              :     use crate::tenant::{
    5608              :         harness::TenantHarness, storage_layer::Layer, timeline::EvictionError, Timeline,
    5609              :     };
    5610              : 
    5611              :     #[tokio::test]
    5612            2 :     async fn two_layer_eviction_attempts_at_the_same_time() {
    5613            2 :         let harness =
    5614            2 :             TenantHarness::create("two_layer_eviction_attempts_at_the_same_time").unwrap();
    5615            2 : 
    5616            8 :         let (tenant, ctx) = harness.load().await;
    5617            2 :         let timeline = tenant
    5618            2 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
    5619            6 :             .await
    5620            2 :             .unwrap();
    5621            2 : 
    5622            2 :         let layer = find_some_layer(&timeline).await;
    5623            2 :         let layer = layer
    5624            2 :             .keep_resident()
    5625            2 :             .await
    5626            2 :             .expect("no download => no downloading errors")
    5627            2 :             .drop_eviction_guard();
    5628            2 : 
    5629            2 :         let forever = std::time::Duration::from_secs(120);
    5630            2 : 
    5631            2 :         let first = layer.evict_and_wait(forever);
    5632            2 :         let second = layer.evict_and_wait(forever);
    5633            2 : 
    5634            2 :         let (first, second) = tokio::join!(first, second);
    5635            2 : 
    5636            2 :         let res = layer.keep_resident().await;
    5637            2 :         assert!(res.is_none(), "{res:?}");
    5638            2 : 
    5639            2 :         match (first, second) {
    5640            2 :             (Ok(()), Ok(())) => {
    5641            2 :                 // because there are no more timeline locks being taken on eviction path, we can
    5642            2 :                 // witness all three outcomes here.
    5643            2 :             }
    5644            2 :             (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
    5645            0 :                 // if one completes before the other, this is fine just as well.
    5646            0 :             }
    5647            2 :             other => unreachable!("unexpected {:?}", other),
    5648            2 :         }
    5649            2 :     }
    5650              : 
    5651            2 :     async fn find_some_layer(timeline: &Timeline) -> Layer {
    5652            2 :         let layers = timeline.layers.read().await;
    5653            2 :         let desc = layers
    5654            2 :             .layer_map()
    5655            2 :             .iter_historic_layers()
    5656            2 :             .next()
    5657            2 :             .expect("must find one layer to evict");
    5658            2 : 
    5659            2 :         layers.get_from_desc(&desc)
    5660            2 :     }
    5661              : }
        

Generated by: LCOV version 2.1-beta