LCOV - code coverage report
Current view: top level - pageserver/src/tenant - timeline.rs (source / functions) Coverage Total Hit
Test: d3863ebe0efe476e2f785dedc32e86b3a6fbc249.info Lines: 63.8 % 3439 2193
Test Date: 2024-11-19 16:58:45 Functions: 58.4 % 327 191

            Line data    Source code
       1              : pub(crate) mod analysis;
       2              : pub(crate) mod compaction;
       3              : pub mod delete;
       4              : pub(crate) mod detach_ancestor;
       5              : mod eviction_task;
       6              : pub(crate) mod handle;
       7              : mod init;
       8              : pub mod layer_manager;
       9              : pub(crate) mod logical_size;
      10              : pub mod offload;
      11              : pub mod span;
      12              : pub mod uninit;
      13              : mod walreceiver;
      14              : 
      15              : use anyhow::{anyhow, bail, ensure, Context, Result};
      16              : use arc_swap::ArcSwap;
      17              : use bytes::Bytes;
      18              : use camino::Utf8Path;
      19              : use chrono::{DateTime, Utc};
      20              : use enumset::EnumSet;
      21              : use fail::fail_point;
      22              : use handle::ShardTimelineId;
      23              : use offload::OffloadError;
      24              : use once_cell::sync::Lazy;
      25              : use pageserver_api::{
      26              :     config::tenant_conf_defaults::DEFAULT_COMPACTION_THRESHOLD,
      27              :     key::{
      28              :         KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
      29              :         NON_INHERITED_SPARSE_RANGE,
      30              :     },
      31              :     keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
      32              :     models::{
      33              :         CompactionAlgorithm, CompactionAlgorithmSettings, DownloadRemoteLayersTaskInfo,
      34              :         DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo,
      35              :         LsnLease, TimelineState,
      36              :     },
      37              :     reltag::BlockNumber,
      38              :     shard::{ShardIdentity, ShardNumber, TenantShardId},
      39              : };
      40              : use rand::Rng;
      41              : use serde_with::serde_as;
      42              : use storage_broker::BrokerClientChannel;
      43              : use tokio::{
      44              :     runtime::Handle,
      45              :     sync::{oneshot, watch},
      46              : };
      47              : use tokio_util::sync::CancellationToken;
      48              : use tracing::*;
      49              : use utils::{
      50              :     fs_ext, pausable_failpoint,
      51              :     sync::gate::{Gate, GateGuard},
      52              : };
      53              : use wal_decoder::serialized_batch::SerializedValueBatch;
      54              : 
      55              : use std::sync::atomic::Ordering as AtomicOrdering;
      56              : use std::sync::{Arc, Mutex, RwLock, Weak};
      57              : use std::time::{Duration, Instant, SystemTime};
      58              : use std::{
      59              :     array,
      60              :     collections::{BTreeMap, HashMap, HashSet},
      61              :     sync::atomic::AtomicU64,
      62              : };
      63              : use std::{cmp::min, ops::ControlFlow};
      64              : use std::{
      65              :     collections::btree_map::Entry,
      66              :     ops::{Deref, Range},
      67              : };
      68              : use std::{pin::pin, sync::OnceLock};
      69              : 
      70              : use crate::{
      71              :     aux_file::AuxFileSizeEstimator,
      72              :     tenant::{
      73              :         config::AttachmentMode,
      74              :         layer_map::{LayerMap, SearchResult},
      75              :         metadata::TimelineMetadata,
      76              :         storage_layer::{inmemory_layer::IndexEntry, PersistentLayerDesc},
      77              :     },
      78              :     walingest::WalLagCooldown,
      79              :     walredo,
      80              : };
      81              : use crate::{
      82              :     context::{DownloadBehavior, RequestContext},
      83              :     disk_usage_eviction_task::DiskUsageEvictionInfo,
      84              :     pgdatadir_mapping::CollectKeySpaceError,
      85              : };
      86              : use crate::{
      87              :     disk_usage_eviction_task::finite_f32,
      88              :     tenant::storage_layer::{
      89              :         AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
      90              :         LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
      91              :         ValuesReconstructState,
      92              :     },
      93              : };
      94              : use crate::{
      95              :     disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
      96              : };
      97              : use crate::{
      98              :     l0_flush::{self, L0FlushGlobalState},
      99              :     metrics::GetKind,
     100              : };
     101              : use crate::{
     102              :     metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
     103              : };
     104              : use crate::{
     105              :     pgdatadir_mapping::DirectoryKind,
     106              :     virtual_file::{MaybeFatalIo, VirtualFile},
     107              : };
     108              : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
     109              : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
     110              : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
     111              : 
     112              : use crate::config::PageServerConf;
     113              : use crate::keyspace::{KeyPartitioning, KeySpace};
     114              : use crate::metrics::TimelineMetrics;
     115              : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
     116              : use crate::tenant::config::TenantConfOpt;
     117              : use pageserver_api::reltag::RelTag;
     118              : use pageserver_api::shard::ShardIndex;
     119              : 
     120              : use postgres_connection::PgConnectionConfig;
     121              : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
     122              : use utils::{
     123              :     completion,
     124              :     generation::Generation,
     125              :     id::TimelineId,
     126              :     lsn::{AtomicLsn, Lsn, RecordLsn},
     127              :     seqwait::SeqWait,
     128              :     simple_rcu::{Rcu, RcuReadGuard},
     129              : };
     130              : 
     131              : use crate::task_mgr;
     132              : use crate::task_mgr::TaskKind;
     133              : use crate::tenant::gc_result::GcResult;
     134              : use crate::ZERO_PAGE;
     135              : use pageserver_api::key::Key;
     136              : 
     137              : use self::delete::DeleteTimelineFlow;
     138              : pub(super) use self::eviction_task::EvictionTaskTenantState;
     139              : use self::eviction_task::EvictionTaskTimelineState;
     140              : use self::layer_manager::LayerManager;
     141              : use self::logical_size::LogicalSize;
     142              : use self::walreceiver::{WalReceiver, WalReceiverConf};
     143              : 
     144              : use super::{
     145              :     config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
     146              :     MaybeOffloaded,
     147              : };
     148              : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
     149              : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
     150              : use super::{
     151              :     remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
     152              :     storage_layer::ReadableLayer,
     153              : };
     154              : use super::{
     155              :     secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
     156              :     GcError,
     157              : };
     158              : 
     159              : #[cfg(test)]
     160              : use pageserver_api::value::Value;
     161              : 
     162              : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
     163              : pub(crate) enum FlushLoopState {
     164              :     NotStarted,
     165              :     Running {
     166              :         #[cfg(test)]
     167              :         expect_initdb_optimization: bool,
     168              :         #[cfg(test)]
     169              :         initdb_optimization_count: usize,
     170              :     },
     171              :     Exited,
     172              : }
     173              : 
     174              : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
     175              : pub enum ImageLayerCreationMode {
     176              :     /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
     177              :     Try,
     178              :     /// Force creating the image layers if possible. For now, no image layers will be created
     179              :     /// for metadata keys. Used in compaction code path with force flag enabled.
     180              :     Force,
     181              :     /// Initial ingestion of the data, and no data should be dropped in this function. This
     182              :     /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
     183              :     /// code path.
     184              :     Initial,
     185              : }
     186              : 
     187              : impl std::fmt::Display for ImageLayerCreationMode {
     188          716 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     189          716 :         write!(f, "{:?}", self)
     190          716 :     }
     191              : }
     192              : 
     193              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     194              : /// Can be removed after all refactors are done.
     195           28 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
     196           28 :     drop(rlock)
     197           28 : }
     198              : 
     199              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     200              : /// Can be removed after all refactors are done.
     201          744 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
     202          744 :     drop(rlock)
     203          744 : }
     204              : 
     205              : /// The outward-facing resources required to build a Timeline
     206              : pub struct TimelineResources {
     207              :     pub remote_client: RemoteTimelineClient,
     208              :     pub timeline_get_throttle:
     209              :         Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::TimelineGet>>,
     210              :     pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
     211              : }
     212              : 
     213              : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
     214              : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
     215              : /// implicitly extends the relation.  At startup, `complete_as_of` is initialized to the current end
     216              : /// of the timeline (disk_consistent_lsn).  It's used on reads of relation sizes to check if the
     217              : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
     218              : pub(crate) struct RelSizeCache {
     219              :     pub(crate) complete_as_of: Lsn,
     220              :     pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
     221              : }
     222              : 
     223              : pub struct Timeline {
     224              :     pub(crate) conf: &'static PageServerConf,
     225              :     tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
     226              : 
     227              :     myself: Weak<Self>,
     228              : 
     229              :     pub(crate) tenant_shard_id: TenantShardId,
     230              :     pub timeline_id: TimelineId,
     231              : 
     232              :     /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
     233              :     /// Never changes for the lifetime of this [`Timeline`] object.
     234              :     ///
     235              :     /// This duplicates the generation stored in LocationConf, but that structure is mutable:
     236              :     /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
     237              :     pub(crate) generation: Generation,
     238              : 
     239              :     /// The detailed sharding information from our parent Tenant.  This enables us to map keys
     240              :     /// to shards, and is constant through the lifetime of this Timeline.
     241              :     shard_identity: ShardIdentity,
     242              : 
     243              :     pub pg_version: u32,
     244              : 
     245              :     /// The tuple has two elements.
     246              :     /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
     247              :     /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
     248              :     ///
     249              :     /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
     250              :     /// We describe these rectangles through the `PersistentLayerDesc` struct.
     251              :     ///
     252              :     /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
     253              :     /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
     254              :     /// `PersistentLayerDesc`'s.
     255              :     ///
     256              :     /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
     257              :     /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
     258              :     /// runtime, e.g., during page reconstruction.
     259              :     ///
     260              :     /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
     261              :     /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
     262              :     pub(crate) layers: tokio::sync::RwLock<LayerManager>,
     263              : 
     264              :     last_freeze_at: AtomicLsn,
     265              :     // Atomic would be more appropriate here.
     266              :     last_freeze_ts: RwLock<Instant>,
     267              : 
     268              :     pub(crate) standby_horizon: AtomicLsn,
     269              : 
     270              :     // WAL redo manager. `None` only for broken tenants.
     271              :     walredo_mgr: Option<Arc<super::WalRedoManager>>,
     272              : 
     273              :     /// Remote storage client.
     274              :     /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
     275              :     pub remote_client: Arc<RemoteTimelineClient>,
     276              : 
     277              :     // What page versions do we hold in the repository? If we get a
     278              :     // request > last_record_lsn, we need to wait until we receive all
     279              :     // the WAL up to the request. The SeqWait provides functions for
     280              :     // that. TODO: If we get a request for an old LSN, such that the
     281              :     // versions have already been garbage collected away, we should
     282              :     // throw an error, but we don't track that currently.
     283              :     //
     284              :     // last_record_lsn.load().last points to the end of last processed WAL record.
     285              :     //
     286              :     // We also remember the starting point of the previous record in
     287              :     // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
     288              :     // first WAL record when the node is started up. But here, we just
     289              :     // keep track of it.
     290              :     last_record_lsn: SeqWait<RecordLsn, Lsn>,
     291              : 
     292              :     // All WAL records have been processed and stored durably on files on
     293              :     // local disk, up to this LSN. On crash and restart, we need to re-process
     294              :     // the WAL starting from this point.
     295              :     //
     296              :     // Some later WAL records might have been processed and also flushed to disk
     297              :     // already, so don't be surprised to see some, but there's no guarantee on
     298              :     // them yet.
     299              :     disk_consistent_lsn: AtomicLsn,
     300              : 
     301              :     // Parent timeline that this timeline was branched from, and the LSN
     302              :     // of the branch point.
     303              :     ancestor_timeline: Option<Arc<Timeline>>,
     304              :     ancestor_lsn: Lsn,
     305              : 
     306              :     pub(super) metrics: TimelineMetrics,
     307              : 
     308              :     // `Timeline` doesn't write these metrics itself, but it manages the lifetime.  Code
     309              :     // in `crate::page_service` writes these metrics.
     310              :     pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
     311              : 
     312              :     directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
     313              : 
     314              :     /// Ensures layers aren't frozen by checkpointer between
     315              :     /// [`Timeline::get_layer_for_write`] and layer reads.
     316              :     /// Locked automatically by [`TimelineWriter`] and checkpointer.
     317              :     /// Must always be acquired before the layer map/individual layer lock
     318              :     /// to avoid deadlock.
     319              :     ///
     320              :     /// The state is cleared upon freezing.
     321              :     write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
     322              : 
     323              :     /// Used to avoid multiple `flush_loop` tasks running
     324              :     pub(super) flush_loop_state: Mutex<FlushLoopState>,
     325              : 
     326              :     /// layer_flush_start_tx can be used to wake up the layer-flushing task.
     327              :     /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
     328              :     ///   The flush cycle counter is sent back on the layer_flush_done channel when
     329              :     ///   the flush finishes. You can use that to wait for the flush to finish.
     330              :     /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
     331              :     ///   read by whoever sends an update
     332              :     layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
     333              :     /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
     334              :     layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
     335              : 
     336              :     // Needed to ensure that we can't create a branch at a point that was already garbage collected
     337              :     pub latest_gc_cutoff_lsn: Rcu<Lsn>,
     338              : 
     339              :     // List of child timelines and their branch points. This is needed to avoid
     340              :     // garbage collecting data that is still needed by the child timelines.
     341              :     pub(crate) gc_info: std::sync::RwLock<GcInfo>,
     342              : 
     343              :     // It may change across major versions so for simplicity
     344              :     // keep it after running initdb for a timeline.
     345              :     // It is needed in checks when we want to error on some operations
     346              :     // when they are requested for pre-initdb lsn.
     347              :     // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
     348              :     // though let's keep them both for better error visibility.
     349              :     pub initdb_lsn: Lsn,
     350              : 
     351              :     /// When did we last calculate the partitioning? Make it pub to test cases.
     352              :     pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
     353              : 
     354              :     /// Configuration: how often should the partitioning be recalculated.
     355              :     repartition_threshold: u64,
     356              : 
     357              :     last_image_layer_creation_check_at: AtomicLsn,
     358              :     last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
     359              : 
     360              :     /// Current logical size of the "datadir", at the last LSN.
     361              :     current_logical_size: LogicalSize,
     362              : 
     363              :     /// Information about the last processed message by the WAL receiver,
     364              :     /// or None if WAL receiver has not received anything for this timeline
     365              :     /// yet.
     366              :     pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
     367              :     pub walreceiver: Mutex<Option<WalReceiver>>,
     368              : 
     369              :     /// Relation size cache
     370              :     pub(crate) rel_size_cache: RwLock<RelSizeCache>,
     371              : 
     372              :     download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
     373              : 
     374              :     state: watch::Sender<TimelineState>,
     375              : 
     376              :     /// Prevent two tasks from deleting the timeline at the same time. If held, the
     377              :     /// timeline is being deleted. If 'true', the timeline has already been deleted.
     378              :     pub delete_progress: TimelineDeleteProgress,
     379              : 
     380              :     eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
     381              : 
     382              :     /// Load or creation time information about the disk_consistent_lsn and when the loading
     383              :     /// happened. Used for consumption metrics.
     384              :     pub(crate) loaded_at: (Lsn, SystemTime),
     385              : 
     386              :     /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
     387              :     pub(crate) gate: Gate,
     388              : 
     389              :     /// Cancellation token scoped to this timeline: anything doing long-running work relating
     390              :     /// to the timeline should drop out when this token fires.
     391              :     pub(crate) cancel: CancellationToken,
     392              : 
     393              :     /// Make sure we only have one running compaction at a time in tests.
     394              :     ///
     395              :     /// Must only be taken in two places:
     396              :     /// - [`Timeline::compact`] (this file)
     397              :     /// - [`delete::delete_local_timeline_directory`]
     398              :     ///
     399              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     400              :     compaction_lock: tokio::sync::Mutex<()>,
     401              : 
     402              :     /// Make sure we only have one running gc at a time.
     403              :     ///
     404              :     /// Must only be taken in two places:
     405              :     /// - [`Timeline::gc`] (this file)
     406              :     /// - [`delete::delete_local_timeline_directory`]
     407              :     ///
     408              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     409              :     gc_lock: tokio::sync::Mutex<()>,
     410              : 
     411              :     /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
     412              :     timeline_get_throttle:
     413              :         Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::TimelineGet>>,
     414              : 
     415              :     /// Size estimator for aux file v2
     416              :     pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
     417              : 
     418              :     /// Some test cases directly place keys into the timeline without actually modifying the directory
     419              :     /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
     420              :     /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
     421              :     /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
     422              :     /// in the future, add `extra_test_sparse_keyspace` if necessary.
     423              :     #[cfg(test)]
     424              :     pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
     425              : 
     426              :     pub(crate) l0_flush_global_state: L0FlushGlobalState,
     427              : 
     428              :     pub(crate) handles: handle::PerTimelineState<crate::page_service::TenantManagerTypes>,
     429              : 
     430              :     pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
     431              : 
     432              :     /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
     433              :     pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
     434              : }
     435              : 
     436              : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
     437              : 
     438              : pub struct WalReceiverInfo {
     439              :     pub wal_source_connconf: PgConnectionConfig,
     440              :     pub last_received_msg_lsn: Lsn,
     441              :     pub last_received_msg_ts: u128,
     442              : }
     443              : 
     444              : /// Information about how much history needs to be retained, needed by
     445              : /// Garbage Collection.
     446              : #[derive(Default)]
     447              : pub(crate) struct GcInfo {
     448              :     /// Specific LSNs that are needed.
     449              :     ///
     450              :     /// Currently, this includes all points where child branches have
     451              :     /// been forked off from. In the future, could also include
     452              :     /// explicit user-defined snapshot points.
     453              :     pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
     454              : 
     455              :     /// The cutoff coordinates, which are combined by selecting the minimum.
     456              :     pub(crate) cutoffs: GcCutoffs,
     457              : 
     458              :     /// Leases granted to particular LSNs.
     459              :     pub(crate) leases: BTreeMap<Lsn, LsnLease>,
     460              : 
     461              :     /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
     462              :     pub(crate) within_ancestor_pitr: bool,
     463              : }
     464              : 
     465              : impl GcInfo {
     466          228 :     pub(crate) fn min_cutoff(&self) -> Lsn {
     467          228 :         self.cutoffs.select_min()
     468          228 :     }
     469              : 
     470          232 :     pub(super) fn insert_child(
     471          232 :         &mut self,
     472          232 :         child_id: TimelineId,
     473          232 :         child_lsn: Lsn,
     474          232 :         is_offloaded: MaybeOffloaded,
     475          232 :     ) {
     476          232 :         self.retain_lsns.push((child_lsn, child_id, is_offloaded));
     477          232 :         self.retain_lsns.sort_by_key(|i| i.0);
     478          232 :     }
     479              : 
     480            4 :     pub(super) fn remove_child_maybe_offloaded(
     481            4 :         &mut self,
     482            4 :         child_id: TimelineId,
     483            4 :         maybe_offloaded: MaybeOffloaded,
     484            4 :     ) -> bool {
     485            4 :         // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
     486            4 :         // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
     487            4 :         let mut removed = false;
     488            6 :         self.retain_lsns.retain(|i| {
     489            6 :             if removed {
     490            2 :                 return true;
     491            4 :             }
     492            4 :             let remove = i.1 == child_id && i.2 == maybe_offloaded;
     493            4 :             removed |= remove;
     494            4 :             !remove
     495            6 :         });
     496            4 :         removed
     497            4 :     }
     498              : 
     499            4 :     pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
     500            4 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
     501            4 :     }
     502              : 
     503            0 :     pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
     504            0 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
     505            0 :     }
     506              : }
     507              : 
     508              : /// The `GcInfo` component describing which Lsns need to be retained.  Functionally, this
     509              : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
     510              : /// between time-based and space-based retention for observability and consumption metrics purposes.
     511              : #[derive(Debug, Clone)]
     512              : pub(crate) struct GcCutoffs {
     513              :     /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
     514              :     /// history we must keep to retain a specified number of bytes of WAL.
     515              :     pub(crate) space: Lsn,
     516              : 
     517              :     /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
     518              :     /// history we must keep to enable reading back at least the PITR interval duration.
     519              :     pub(crate) time: Lsn,
     520              : }
     521              : 
     522              : impl Default for GcCutoffs {
     523          418 :     fn default() -> Self {
     524          418 :         Self {
     525          418 :             space: Lsn::INVALID,
     526          418 :             time: Lsn::INVALID,
     527          418 :         }
     528          418 :     }
     529              : }
     530              : 
     531              : impl GcCutoffs {
     532          268 :     fn select_min(&self) -> Lsn {
     533          268 :         std::cmp::min(self.space, self.time)
     534          268 :     }
     535              : }
     536              : 
     537              : pub(crate) struct TimelineVisitOutcome {
     538              :     completed_keyspace: KeySpace,
     539              :     image_covered_keyspace: KeySpace,
     540              : }
     541              : 
     542              : /// An error happened in a get() operation.
     543            2 : #[derive(thiserror::Error, Debug)]
     544              : pub(crate) enum PageReconstructError {
     545              :     #[error(transparent)]
     546              :     Other(anyhow::Error),
     547              : 
     548              :     #[error("Ancestor LSN wait error: {0}")]
     549              :     AncestorLsnTimeout(WaitLsnError),
     550              : 
     551              :     #[error("timeline shutting down")]
     552              :     Cancelled,
     553              : 
     554              :     /// An error happened replaying WAL records
     555              :     #[error(transparent)]
     556              :     WalRedo(anyhow::Error),
     557              : 
     558              :     #[error("{0}")]
     559              :     MissingKey(MissingKeyError),
     560              : }
     561              : 
     562              : impl From<anyhow::Error> for PageReconstructError {
     563            0 :     fn from(value: anyhow::Error) -> Self {
     564            0 :         // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
     565            0 :         match value.downcast::<PageReconstructError>() {
     566            0 :             Ok(pre) => pre,
     567            0 :             Err(other) => PageReconstructError::Other(other),
     568              :         }
     569            0 :     }
     570              : }
     571              : 
     572              : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
     573            0 :     fn from(value: utils::bin_ser::DeserializeError) -> Self {
     574            0 :         PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
     575            0 :     }
     576              : }
     577              : 
     578              : impl From<layer_manager::Shutdown> for PageReconstructError {
     579            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     580            0 :         PageReconstructError::Cancelled
     581            0 :     }
     582              : }
     583              : 
     584              : impl GetVectoredError {
     585              :     #[cfg(test)]
     586            6 :     pub(crate) fn is_missing_key_error(&self) -> bool {
     587            6 :         matches!(self, Self::MissingKey(_))
     588            6 :     }
     589              : }
     590              : 
     591              : impl From<layer_manager::Shutdown> for GetVectoredError {
     592            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     593            0 :         GetVectoredError::Cancelled
     594            0 :     }
     595              : }
     596              : 
     597              : #[derive(thiserror::Error)]
     598              : pub struct MissingKeyError {
     599              :     key: Key,
     600              :     shard: ShardNumber,
     601              :     cont_lsn: Lsn,
     602              :     request_lsn: Lsn,
     603              :     ancestor_lsn: Option<Lsn>,
     604              :     backtrace: Option<std::backtrace::Backtrace>,
     605              : }
     606              : 
     607              : impl std::fmt::Debug for MissingKeyError {
     608            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     609            0 :         write!(f, "{}", self)
     610            0 :     }
     611              : }
     612              : 
     613              : impl std::fmt::Display for MissingKeyError {
     614            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     615            0 :         write!(
     616            0 :             f,
     617            0 :             "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
     618            0 :             self.key, self.shard, self.cont_lsn, self.request_lsn
     619            0 :         )?;
     620            0 :         if let Some(ref ancestor_lsn) = self.ancestor_lsn {
     621            0 :             write!(f, ", ancestor {}", ancestor_lsn)?;
     622            0 :         }
     623              : 
     624            0 :         if let Some(ref backtrace) = self.backtrace {
     625            0 :             write!(f, "\n{}", backtrace)?;
     626            0 :         }
     627              : 
     628            0 :         Ok(())
     629            0 :     }
     630              : }
     631              : 
     632              : impl PageReconstructError {
     633              :     /// Returns true if this error indicates a tenant/timeline shutdown alike situation
     634            0 :     pub(crate) fn is_stopping(&self) -> bool {
     635              :         use PageReconstructError::*;
     636            0 :         match self {
     637            0 :             Cancelled => true,
     638            0 :             Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
     639              :         }
     640            0 :     }
     641              : }
     642              : 
     643            0 : #[derive(thiserror::Error, Debug)]
     644              : pub(crate) enum CreateImageLayersError {
     645              :     #[error("timeline shutting down")]
     646              :     Cancelled,
     647              : 
     648              :     #[error("read failed")]
     649              :     GetVectoredError(#[source] GetVectoredError),
     650              : 
     651              :     #[error("reconstruction failed")]
     652              :     PageReconstructError(#[source] PageReconstructError),
     653              : 
     654              :     #[error(transparent)]
     655              :     Other(#[from] anyhow::Error),
     656              : }
     657              : 
     658              : impl From<layer_manager::Shutdown> for CreateImageLayersError {
     659            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     660            0 :         CreateImageLayersError::Cancelled
     661            0 :     }
     662              : }
     663              : 
     664            0 : #[derive(thiserror::Error, Debug, Clone)]
     665              : pub(crate) enum FlushLayerError {
     666              :     /// Timeline cancellation token was cancelled
     667              :     #[error("timeline shutting down")]
     668              :     Cancelled,
     669              : 
     670              :     /// We tried to flush a layer while the Timeline is in an unexpected state
     671              :     #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
     672              :     NotRunning(FlushLoopState),
     673              : 
     674              :     // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
     675              :     // loop via a watch channel, where we can only borrow it.
     676              :     #[error("create image layers (shared)")]
     677              :     CreateImageLayersError(Arc<CreateImageLayersError>),
     678              : 
     679              :     #[error("other (shared)")]
     680              :     Other(#[from] Arc<anyhow::Error>),
     681              : }
     682              : 
     683              : impl FlushLayerError {
     684              :     // When crossing from generic anyhow errors to this error type, we explicitly check
     685              :     // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
     686            0 :     fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
     687            0 :         let cancelled = timeline.cancel.is_cancelled()
     688              :             // The upload queue might have been shut down before the official cancellation of the timeline.
     689            0 :             || err
     690            0 :                 .downcast_ref::<NotInitialized>()
     691            0 :                 .map(NotInitialized::is_stopping)
     692            0 :                 .unwrap_or_default();
     693            0 :         if cancelled {
     694            0 :             Self::Cancelled
     695              :         } else {
     696            0 :             Self::Other(Arc::new(err))
     697              :         }
     698            0 :     }
     699              : }
     700              : 
     701              : impl From<layer_manager::Shutdown> for FlushLayerError {
     702            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     703            0 :         FlushLayerError::Cancelled
     704            0 :     }
     705              : }
     706              : 
     707            0 : #[derive(thiserror::Error, Debug)]
     708              : pub(crate) enum GetVectoredError {
     709              :     #[error("timeline shutting down")]
     710              :     Cancelled,
     711              : 
     712              :     #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
     713              :     Oversized(u64),
     714              : 
     715              :     #[error("requested at invalid LSN: {0}")]
     716              :     InvalidLsn(Lsn),
     717              : 
     718              :     #[error("requested key not found: {0}")]
     719              :     MissingKey(MissingKeyError),
     720              : 
     721              :     #[error("ancestry walk")]
     722              :     GetReadyAncestorError(#[source] GetReadyAncestorError),
     723              : 
     724              :     #[error(transparent)]
     725              :     Other(#[from] anyhow::Error),
     726              : }
     727              : 
     728              : impl From<GetReadyAncestorError> for GetVectoredError {
     729            2 :     fn from(value: GetReadyAncestorError) -> Self {
     730              :         use GetReadyAncestorError::*;
     731            2 :         match value {
     732            0 :             Cancelled => GetVectoredError::Cancelled,
     733              :             AncestorLsnTimeout(_) | BadState { .. } => {
     734            2 :                 GetVectoredError::GetReadyAncestorError(value)
     735              :             }
     736              :         }
     737            2 :     }
     738              : }
     739              : 
     740            2 : #[derive(thiserror::Error, Debug)]
     741              : pub(crate) enum GetReadyAncestorError {
     742              :     #[error("ancestor LSN wait error")]
     743              :     AncestorLsnTimeout(#[from] WaitLsnError),
     744              : 
     745              :     #[error("bad state on timeline {timeline_id}: {state:?}")]
     746              :     BadState {
     747              :         timeline_id: TimelineId,
     748              :         state: TimelineState,
     749              :     },
     750              : 
     751              :     #[error("cancelled")]
     752              :     Cancelled,
     753              : }
     754              : 
     755              : #[derive(Clone, Copy)]
     756              : pub enum LogicalSizeCalculationCause {
     757              :     Initial,
     758              :     ConsumptionMetricsSyntheticSize,
     759              :     EvictionTaskImitation,
     760              :     TenantSizeHandler,
     761              : }
     762              : 
     763              : pub enum GetLogicalSizePriority {
     764              :     User,
     765              :     Background,
     766              : }
     767              : 
     768            0 : #[derive(enumset::EnumSetType)]
     769              : pub(crate) enum CompactFlags {
     770              :     ForceRepartition,
     771              :     ForceImageLayerCreation,
     772              :     ForceL0Compaction,
     773              :     EnhancedGcBottomMostCompaction,
     774              :     DryRun,
     775              : }
     776              : 
     777              : impl std::fmt::Debug for Timeline {
     778            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
     779            0 :         write!(f, "Timeline<{}>", self.timeline_id)
     780            0 :     }
     781              : }
     782              : 
     783            0 : #[derive(thiserror::Error, Debug)]
     784              : pub(crate) enum WaitLsnError {
     785              :     // Called on a timeline which is shutting down
     786              :     #[error("Shutdown")]
     787              :     Shutdown,
     788              : 
     789              :     // Called on an timeline not in active state or shutting down
     790              :     #[error("Bad timeline state: {0:?}")]
     791              :     BadState(TimelineState),
     792              : 
     793              :     // Timeout expired while waiting for LSN to catch up with goal.
     794              :     #[error("{0}")]
     795              :     Timeout(String),
     796              : }
     797              : 
     798              : // The impls below achieve cancellation mapping for errors.
     799              : // Perhaps there's a way of achieving this with less cruft.
     800              : 
     801              : impl From<CreateImageLayersError> for CompactionError {
     802            0 :     fn from(e: CreateImageLayersError) -> Self {
     803            0 :         match e {
     804            0 :             CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
     805            0 :             CreateImageLayersError::Other(e) => {
     806            0 :                 CompactionError::Other(e.context("create image layers"))
     807              :             }
     808            0 :             _ => CompactionError::Other(e.into()),
     809              :         }
     810            0 :     }
     811              : }
     812              : 
     813              : impl From<CreateImageLayersError> for FlushLayerError {
     814            0 :     fn from(e: CreateImageLayersError) -> Self {
     815            0 :         match e {
     816            0 :             CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
     817            0 :             any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
     818              :         }
     819            0 :     }
     820              : }
     821              : 
     822              : impl From<PageReconstructError> for CreateImageLayersError {
     823            0 :     fn from(e: PageReconstructError) -> Self {
     824            0 :         match e {
     825            0 :             PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
     826            0 :             _ => CreateImageLayersError::PageReconstructError(e),
     827              :         }
     828            0 :     }
     829              : }
     830              : 
     831              : impl From<GetVectoredError> for CreateImageLayersError {
     832            0 :     fn from(e: GetVectoredError) -> Self {
     833            0 :         match e {
     834            0 :             GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
     835            0 :             _ => CreateImageLayersError::GetVectoredError(e),
     836              :         }
     837            0 :     }
     838              : }
     839              : 
     840              : impl From<GetVectoredError> for PageReconstructError {
     841            6 :     fn from(e: GetVectoredError) -> Self {
     842            6 :         match e {
     843            0 :             GetVectoredError::Cancelled => PageReconstructError::Cancelled,
     844            0 :             GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
     845            0 :             err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
     846            4 :             GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
     847            2 :             GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
     848            0 :             GetVectoredError::Other(err) => PageReconstructError::Other(err),
     849              :         }
     850            6 :     }
     851              : }
     852              : 
     853              : impl From<GetReadyAncestorError> for PageReconstructError {
     854            2 :     fn from(e: GetReadyAncestorError) -> Self {
     855              :         use GetReadyAncestorError::*;
     856            2 :         match e {
     857            0 :             AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
     858            2 :             bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
     859            0 :             Cancelled => PageReconstructError::Cancelled,
     860              :         }
     861            2 :     }
     862              : }
     863              : 
     864              : pub(crate) enum WaitLsnWaiter<'a> {
     865              :     Timeline(&'a Timeline),
     866              :     Tenant,
     867              :     PageService,
     868              : }
     869              : 
     870              : /// Argument to [`Timeline::shutdown`].
     871              : #[derive(Debug, Clone, Copy)]
     872              : pub(crate) enum ShutdownMode {
     873              :     /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
     874              :     /// also to remote storage.  This method can easily take multiple seconds for a busy timeline.
     875              :     ///
     876              :     /// While we are flushing, we continue to accept read I/O for LSNs ingested before
     877              :     /// the call to [`Timeline::shutdown`].
     878              :     FreezeAndFlush,
     879              :     /// Only flush the layers to the remote storage without freezing any open layers. This is the
     880              :     /// mode used by ancestor detach and any other operations that reloads a tenant but not increasing
     881              :     /// the generation number.
     882              :     Flush,
     883              :     /// Shut down immediately, without waiting for any open layers to flush.
     884              :     Hard,
     885              : }
     886              : 
     887              : struct ImageLayerCreationOutcome {
     888              :     image: Option<ResidentLayer>,
     889              :     next_start_key: Key,
     890              : }
     891              : 
     892              : /// Public interface functions
     893              : impl Timeline {
     894              :     /// Get the LSN where this branch was created
     895            4 :     pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
     896            4 :         self.ancestor_lsn
     897            4 :     }
     898              : 
     899              :     /// Get the ancestor's timeline id
     900           12 :     pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
     901           12 :         self.ancestor_timeline
     902           12 :             .as_ref()
     903           12 :             .map(|ancestor| ancestor.timeline_id)
     904           12 :     }
     905              : 
     906              :     /// Get the ancestor timeline
     907            2 :     pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
     908            2 :         self.ancestor_timeline.as_ref()
     909            2 :     }
     910              : 
     911              :     /// Get the bytes written since the PITR cutoff on this branch, and
     912              :     /// whether this branch's ancestor_lsn is within its parent's PITR.
     913            0 :     pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
     914            0 :         let gc_info = self.gc_info.read().unwrap();
     915            0 :         let history = self
     916            0 :             .get_last_record_lsn()
     917            0 :             .checked_sub(gc_info.cutoffs.time)
     918            0 :             .unwrap_or(Lsn(0))
     919            0 :             .0;
     920            0 :         (history, gc_info.within_ancestor_pitr)
     921            0 :     }
     922              : 
     923              :     /// Lock and get timeline's GC cutoff
     924          248 :     pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
     925          248 :         self.latest_gc_cutoff_lsn.read()
     926          248 :     }
     927              : 
     928              :     /// Look up given page version.
     929              :     ///
     930              :     /// If a remote layer file is needed, it is downloaded as part of this
     931              :     /// call.
     932              :     ///
     933              :     /// This method enforces [`Self::timeline_get_throttle`] internally.
     934              :     ///
     935              :     /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
     936              :     /// abstraction above this needs to store suitable metadata to track what
     937              :     /// data exists with what keys, in separate metadata entries. If a
     938              :     /// non-existent key is requested, we may incorrectly return a value from
     939              :     /// an ancestor branch, for example, or waste a lot of cycles chasing the
     940              :     /// non-existing key.
     941              :     ///
     942              :     /// # Cancel-Safety
     943              :     ///
     944              :     /// This method is cancellation-safe.
     945              :     #[inline(always)]
     946       606728 :     pub(crate) async fn get(
     947       606728 :         &self,
     948       606728 :         key: Key,
     949       606728 :         lsn: Lsn,
     950       606728 :         ctx: &RequestContext,
     951       606728 :     ) -> Result<Bytes, PageReconstructError> {
     952       606728 :         if !lsn.is_valid() {
     953            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
     954       606728 :         }
     955       606728 : 
     956       606728 :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
     957       606728 :         // already checked the key against the shard_identity when looking up the Timeline from
     958       606728 :         // page_service.
     959       606728 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
     960              : 
     961       606728 :         self.timeline_get_throttle.throttle(ctx, 1).await;
     962              : 
     963       606728 :         let keyspace = KeySpace {
     964       606728 :             ranges: vec![key..key.next()],
     965       606728 :         };
     966       606728 : 
     967       606728 :         // Initialise the reconstruct state for the key with the cache
     968       606728 :         // entry returned above.
     969       606728 :         let mut reconstruct_state = ValuesReconstructState::new();
     970              : 
     971       606728 :         let vectored_res = self
     972       606728 :             .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
     973       183045 :             .await;
     974              : 
     975       606728 :         let key_value = vectored_res?.pop_first();
     976       606722 :         match key_value {
     977       606710 :             Some((got_key, value)) => {
     978       606710 :                 if got_key != key {
     979            0 :                     error!(
     980            0 :                         "Expected {}, but singular vectored get returned {}",
     981              :                         key, got_key
     982              :                     );
     983            0 :                     Err(PageReconstructError::Other(anyhow!(
     984            0 :                         "Singular vectored get returned wrong key"
     985            0 :                     )))
     986              :                 } else {
     987       606710 :                     value
     988              :                 }
     989              :             }
     990           12 :             None => Err(PageReconstructError::MissingKey(MissingKeyError {
     991           12 :                 key,
     992           12 :                 shard: self.shard_identity.get_shard_number(&key),
     993           12 :                 cont_lsn: Lsn(0),
     994           12 :                 request_lsn: lsn,
     995           12 :                 ancestor_lsn: None,
     996           12 :                 backtrace: None,
     997           12 :             })),
     998              :         }
     999       606728 :     }
    1000              : 
    1001              :     pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
    1002              :     pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
    1003              : 
    1004              :     /// Look up multiple page versions at a given LSN
    1005              :     ///
    1006              :     /// This naive implementation will be replaced with a more efficient one
    1007              :     /// which actually vectorizes the read path.
    1008        19528 :     pub(crate) async fn get_vectored(
    1009        19528 :         &self,
    1010        19528 :         keyspace: KeySpace,
    1011        19528 :         lsn: Lsn,
    1012        19528 :         ctx: &RequestContext,
    1013        19528 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1014        19528 :         if !lsn.is_valid() {
    1015            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1016        19528 :         }
    1017        19528 : 
    1018        19528 :         let key_count = keyspace.total_raw_size().try_into().unwrap();
    1019        19528 :         if key_count > Timeline::MAX_GET_VECTORED_KEYS {
    1020            0 :             return Err(GetVectoredError::Oversized(key_count));
    1021        19528 :         }
    1022              : 
    1023        39056 :         for range in &keyspace.ranges {
    1024        19528 :             let mut key = range.start;
    1025        39262 :             while key != range.end {
    1026        19734 :                 assert!(!self.shard_identity.is_key_disposable(&key));
    1027        19734 :                 key = key.next();
    1028              :             }
    1029              :         }
    1030              : 
    1031        19528 :         trace!(
    1032            0 :             "get vectored request for {:?}@{} from task kind {:?}",
    1033            0 :             keyspace,
    1034            0 :             lsn,
    1035            0 :             ctx.task_kind(),
    1036              :         );
    1037              : 
    1038        19528 :         let start = crate::metrics::GET_VECTORED_LATENCY
    1039        19528 :             .for_task_kind(ctx.task_kind())
    1040        19528 :             .map(|metric| (metric, Instant::now()));
    1041              : 
    1042              :         // start counting after throttle so that throttle time
    1043              :         // is always less than observation time
    1044        19528 :         let throttled = self
    1045        19528 :             .timeline_get_throttle
    1046        19528 :             .throttle(ctx, key_count as usize)
    1047            0 :             .await;
    1048              : 
    1049        19528 :         let res = self
    1050        19528 :             .get_vectored_impl(
    1051        19528 :                 keyspace.clone(),
    1052        19528 :                 lsn,
    1053        19528 :                 &mut ValuesReconstructState::new(),
    1054        19528 :                 ctx,
    1055        19528 :             )
    1056         7038 :             .await;
    1057              : 
    1058        19528 :         if let Some((metric, start)) = start {
    1059            0 :             let elapsed = start.elapsed();
    1060            0 :             let ex_throttled = if let Some(throttled) = throttled {
    1061            0 :                 elapsed.checked_sub(throttled)
    1062              :             } else {
    1063            0 :                 Some(elapsed)
    1064              :             };
    1065              : 
    1066            0 :             if let Some(ex_throttled) = ex_throttled {
    1067            0 :                 metric.observe(ex_throttled.as_secs_f64());
    1068            0 :             } else {
    1069            0 :                 use utils::rate_limit::RateLimit;
    1070            0 :                 static LOGGED: Lazy<Mutex<RateLimit>> =
    1071            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
    1072            0 :                 let mut rate_limit = LOGGED.lock().unwrap();
    1073            0 :                 rate_limit.call(|| {
    1074            0 :                     warn!("error deducting time spent throttled; this message is logged at a global rate limit");
    1075            0 :                 });
    1076            0 :             }
    1077        19528 :         }
    1078              : 
    1079        19528 :         res
    1080        19528 :     }
    1081              : 
    1082              :     /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
    1083              :     /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
    1084              :     /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
    1085              :     /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
    1086              :     /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
    1087              :     /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
    1088              :     /// the scan operation will not cause OOM in the future.
    1089           12 :     pub(crate) async fn scan(
    1090           12 :         &self,
    1091           12 :         keyspace: KeySpace,
    1092           12 :         lsn: Lsn,
    1093           12 :         ctx: &RequestContext,
    1094           12 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1095           12 :         if !lsn.is_valid() {
    1096            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1097           12 :         }
    1098           12 : 
    1099           12 :         trace!(
    1100            0 :             "key-value scan request for {:?}@{} from task kind {:?}",
    1101            0 :             keyspace,
    1102            0 :             lsn,
    1103            0 :             ctx.task_kind()
    1104              :         );
    1105              : 
    1106              :         // We should generalize this into Keyspace::contains in the future.
    1107           24 :         for range in &keyspace.ranges {
    1108           12 :             if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
    1109           12 :                 || range.end.field1 > METADATA_KEY_END_PREFIX
    1110              :             {
    1111            0 :                 return Err(GetVectoredError::Other(anyhow::anyhow!(
    1112            0 :                     "only metadata keyspace can be scanned"
    1113            0 :                 )));
    1114           12 :             }
    1115              :         }
    1116              : 
    1117           12 :         let start = crate::metrics::SCAN_LATENCY
    1118           12 :             .for_task_kind(ctx.task_kind())
    1119           12 :             .map(ScanLatencyOngoingRecording::start_recording);
    1120              : 
    1121              :         // start counting after throttle so that throttle time
    1122              :         // is always less than observation time
    1123           12 :         let throttled = self
    1124           12 :             .timeline_get_throttle
    1125           12 :             // assume scan = 1 quota for now until we find a better way to process this
    1126           12 :             .throttle(ctx, 1)
    1127            0 :             .await;
    1128              : 
    1129           12 :         let vectored_res = self
    1130           12 :             .get_vectored_impl(
    1131           12 :                 keyspace.clone(),
    1132           12 :                 lsn,
    1133           12 :                 &mut ValuesReconstructState::default(),
    1134           12 :                 ctx,
    1135           12 :             )
    1136            0 :             .await;
    1137              : 
    1138           12 :         if let Some(recording) = start {
    1139            0 :             recording.observe(throttled);
    1140           12 :         }
    1141              : 
    1142           12 :         vectored_res
    1143           12 :     }
    1144              : 
    1145       626570 :     pub(super) async fn get_vectored_impl(
    1146       626570 :         &self,
    1147       626570 :         keyspace: KeySpace,
    1148       626570 :         lsn: Lsn,
    1149       626570 :         reconstruct_state: &mut ValuesReconstructState,
    1150       626570 :         ctx: &RequestContext,
    1151       626570 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1152       626570 :         let get_kind = if keyspace.total_raw_size() == 1 {
    1153       626134 :             GetKind::Singular
    1154              :         } else {
    1155          436 :             GetKind::Vectored
    1156              :         };
    1157              : 
    1158       626570 :         let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
    1159       626570 :             .for_get_kind(get_kind)
    1160       626570 :             .start_timer();
    1161       626570 :         self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
    1162       192204 :             .await?;
    1163       626554 :         get_data_timer.stop_and_record();
    1164       626554 : 
    1165       626554 :         let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
    1166       626554 :             .for_get_kind(get_kind)
    1167       626554 :             .start_timer();
    1168       626554 :         let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
    1169       626554 :         let layers_visited = reconstruct_state.get_layers_visited();
    1170              : 
    1171       666880 :         for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
    1172       666880 :             match res {
    1173            0 :                 Err(err) => {
    1174            0 :                     results.insert(key, Err(err));
    1175            0 :                 }
    1176       666880 :                 Ok(state) => {
    1177       666880 :                     let state = ValueReconstructState::from(state);
    1178              : 
    1179       666880 :                     let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
    1180       666880 :                     results.insert(key, reconstruct_res);
    1181              :                 }
    1182              :             }
    1183              :         }
    1184       626554 :         reconstruct_timer.stop_and_record();
    1185       626554 : 
    1186       626554 :         // For aux file keys (v1 or v2) the vectored read path does not return an error
    1187       626554 :         // when they're missing. Instead they are omitted from the resulting btree
    1188       626554 :         // (this is a requirement, not a bug). Skip updating the metric in these cases
    1189       626554 :         // to avoid infinite results.
    1190       626554 :         if !results.is_empty() {
    1191       626348 :             let avg = layers_visited as f64 / results.len() as f64;
    1192       626348 :             if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
    1193            0 :                 use utils::rate_limit::RateLimit;
    1194            0 :                 static LOGGED: Lazy<Mutex<RateLimit>> =
    1195            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
    1196            0 :                 let mut rate_limit = LOGGED.lock().unwrap();
    1197            0 :                 rate_limit.call(|| {
    1198            0 :                     tracing::info!(
    1199            0 :                       shard_id = %self.tenant_shard_id.shard_slug(),
    1200            0 :                       lsn = %lsn,
    1201            0 :                       "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
    1202            0 :                       keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
    1203            0 :                 });
    1204       626348 :             }
    1205              : 
    1206              :             // Note that this is an approximation. Tracking the exact number of layers visited
    1207              :             // per key requires virtually unbounded memory usage and is inefficient
    1208              :             // (i.e. segment tree tracking each range queried from a layer)
    1209       626348 :             crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
    1210          206 :         }
    1211              : 
    1212       626554 :         Ok(results)
    1213       626570 :     }
    1214              : 
    1215              :     /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
    1216       274290 :     pub(crate) fn get_last_record_lsn(&self) -> Lsn {
    1217       274290 :         self.last_record_lsn.load().last
    1218       274290 :     }
    1219              : 
    1220            0 :     pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
    1221            0 :         self.last_record_lsn.load().prev
    1222            0 :     }
    1223              : 
    1224              :     /// Atomically get both last and prev.
    1225          228 :     pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
    1226          228 :         self.last_record_lsn.load()
    1227          228 :     }
    1228              : 
    1229              :     /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
    1230              :     /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
    1231            0 :     pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
    1232            0 :         self.last_record_lsn.status_receiver()
    1233            0 :     }
    1234              : 
    1235          414 :     pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
    1236          414 :         self.disk_consistent_lsn.load()
    1237          414 :     }
    1238              : 
    1239              :     /// remote_consistent_lsn from the perspective of the tenant's current generation,
    1240              :     /// not validated with control plane yet.
    1241              :     /// See [`Self::get_remote_consistent_lsn_visible`].
    1242            0 :     pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
    1243            0 :         self.remote_client.remote_consistent_lsn_projected()
    1244            0 :     }
    1245              : 
    1246              :     /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
    1247              :     /// i.e. a value of remote_consistent_lsn_projected which has undergone
    1248              :     /// generation validation in the deletion queue.
    1249            0 :     pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
    1250            0 :         self.remote_client.remote_consistent_lsn_visible()
    1251            0 :     }
    1252              : 
    1253              :     /// The sum of the file size of all historic layers in the layer map.
    1254              :     /// This method makes no distinction between local and remote layers.
    1255              :     /// Hence, the result **does not represent local filesystem usage**.
    1256            0 :     pub(crate) async fn layer_size_sum(&self) -> u64 {
    1257            0 :         let guard = self.layers.read().await;
    1258            0 :         guard.layer_size_sum()
    1259            0 :     }
    1260              : 
    1261            0 :     pub(crate) fn resident_physical_size(&self) -> u64 {
    1262            0 :         self.metrics.resident_physical_size_get()
    1263            0 :     }
    1264              : 
    1265            0 :     pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
    1266            0 :         array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
    1267            0 :     }
    1268              : 
    1269              :     ///
    1270              :     /// Wait until WAL has been received and processed up to this LSN.
    1271              :     ///
    1272              :     /// You should call this before any of the other get_* or list_* functions. Calling
    1273              :     /// those functions with an LSN that has been processed yet is an error.
    1274              :     ///
    1275       224854 :     pub(crate) async fn wait_lsn(
    1276       224854 :         &self,
    1277       224854 :         lsn: Lsn,
    1278       224854 :         who_is_waiting: WaitLsnWaiter<'_>,
    1279       224854 :         ctx: &RequestContext, /* Prepare for use by cancellation */
    1280       224854 :     ) -> Result<(), WaitLsnError> {
    1281       224854 :         let state = self.current_state();
    1282       224854 :         if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
    1283            0 :             return Err(WaitLsnError::Shutdown);
    1284       224854 :         } else if !matches!(state, TimelineState::Active) {
    1285            0 :             return Err(WaitLsnError::BadState(state));
    1286       224854 :         }
    1287       224854 : 
    1288       224854 :         if cfg!(debug_assertions) {
    1289       224854 :             match ctx.task_kind() {
    1290              :                 TaskKind::WalReceiverManager
    1291              :                 | TaskKind::WalReceiverConnectionHandler
    1292              :                 | TaskKind::WalReceiverConnectionPoller => {
    1293            0 :                     let is_myself = match who_is_waiting {
    1294            0 :                         WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
    1295            0 :                         WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
    1296              :                     };
    1297            0 :                     if is_myself {
    1298            0 :                         if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
    1299              :                             // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
    1300            0 :                             panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
    1301            0 :                         }
    1302            0 :                     } else {
    1303            0 :                         // if another  timeline's  is waiting for us, there's no deadlock risk because
    1304            0 :                         // our walreceiver task can make progress independent of theirs
    1305            0 :                     }
    1306              :                 }
    1307       224854 :                 _ => {}
    1308              :             }
    1309            0 :         }
    1310              : 
    1311       224854 :         let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
    1312       224854 : 
    1313       224854 :         match self
    1314       224854 :             .last_record_lsn
    1315       224854 :             .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
    1316            0 :             .await
    1317              :         {
    1318       224854 :             Ok(()) => Ok(()),
    1319            0 :             Err(e) => {
    1320              :                 use utils::seqwait::SeqWaitError::*;
    1321            0 :                 match e {
    1322            0 :                     Shutdown => Err(WaitLsnError::Shutdown),
    1323              :                     Timeout => {
    1324              :                         // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
    1325            0 :                         drop(_timer);
    1326            0 :                         let walreceiver_status = self.walreceiver_status();
    1327            0 :                         Err(WaitLsnError::Timeout(format!(
    1328            0 :                         "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
    1329            0 :                         lsn,
    1330            0 :                         self.get_last_record_lsn(),
    1331            0 :                         self.get_disk_consistent_lsn(),
    1332            0 :                         walreceiver_status,
    1333            0 :                     )))
    1334              :                     }
    1335              :                 }
    1336              :             }
    1337              :         }
    1338       224854 :     }
    1339              : 
    1340            0 :     pub(crate) fn walreceiver_status(&self) -> String {
    1341            0 :         match &*self.walreceiver.lock().unwrap() {
    1342            0 :             None => "stopping or stopped".to_string(),
    1343            0 :             Some(walreceiver) => match walreceiver.status() {
    1344            0 :                 Some(status) => status.to_human_readable_string(),
    1345            0 :                 None => "Not active".to_string(),
    1346              :             },
    1347              :         }
    1348            0 :     }
    1349              : 
    1350              :     /// Check that it is valid to request operations with that lsn.
    1351          232 :     pub(crate) fn check_lsn_is_in_scope(
    1352          232 :         &self,
    1353          232 :         lsn: Lsn,
    1354          232 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1355          232 :     ) -> anyhow::Result<()> {
    1356          232 :         ensure!(
    1357          232 :             lsn >= **latest_gc_cutoff_lsn,
    1358            4 :             "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
    1359            4 :             lsn,
    1360            4 :             **latest_gc_cutoff_lsn,
    1361              :         );
    1362          228 :         Ok(())
    1363          232 :     }
    1364              : 
    1365              :     /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
    1366           10 :     pub(crate) fn init_lsn_lease(
    1367           10 :         &self,
    1368           10 :         lsn: Lsn,
    1369           10 :         length: Duration,
    1370           10 :         ctx: &RequestContext,
    1371           10 :     ) -> anyhow::Result<LsnLease> {
    1372           10 :         self.make_lsn_lease(lsn, length, true, ctx)
    1373           10 :     }
    1374              : 
    1375              :     /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
    1376            4 :     pub(crate) fn renew_lsn_lease(
    1377            4 :         &self,
    1378            4 :         lsn: Lsn,
    1379            4 :         length: Duration,
    1380            4 :         ctx: &RequestContext,
    1381            4 :     ) -> anyhow::Result<LsnLease> {
    1382            4 :         self.make_lsn_lease(lsn, length, false, ctx)
    1383            4 :     }
    1384              : 
    1385              :     /// Obtains a temporary lease blocking garbage collection for the given LSN.
    1386              :     ///
    1387              :     /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
    1388              :     /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
    1389              :     ///
    1390              :     /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
    1391              :     /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
    1392           14 :     fn make_lsn_lease(
    1393           14 :         &self,
    1394           14 :         lsn: Lsn,
    1395           14 :         length: Duration,
    1396           14 :         init: bool,
    1397           14 :         _ctx: &RequestContext,
    1398           14 :     ) -> anyhow::Result<LsnLease> {
    1399           12 :         let lease = {
    1400              :             // Normalize the requested LSN to be aligned, and move to the first record
    1401              :             // if it points to the beginning of the page (header).
    1402           14 :             let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
    1403           14 : 
    1404           14 :             let mut gc_info = self.gc_info.write().unwrap();
    1405           14 : 
    1406           14 :             let valid_until = SystemTime::now() + length;
    1407           14 : 
    1408           14 :             let entry = gc_info.leases.entry(lsn);
    1409           14 : 
    1410           14 :             match entry {
    1411            6 :                 Entry::Occupied(mut occupied) => {
    1412            6 :                     let existing_lease = occupied.get_mut();
    1413            6 :                     if valid_until > existing_lease.valid_until {
    1414            2 :                         existing_lease.valid_until = valid_until;
    1415            2 :                         let dt: DateTime<Utc> = valid_until.into();
    1416            2 :                         info!("lease extended to {}", dt);
    1417              :                     } else {
    1418            4 :                         let dt: DateTime<Utc> = existing_lease.valid_until.into();
    1419            4 :                         info!("existing lease covers greater length, valid until {}", dt);
    1420              :                     }
    1421              : 
    1422            6 :                     existing_lease.clone()
    1423              :                 }
    1424            8 :                 Entry::Vacant(vacant) => {
    1425              :                     // Reject already GC-ed LSN (lsn < latest_gc_cutoff) if we are in AttachedSingle and
    1426              :                     // not blocked by the lsn lease deadline.
    1427            8 :                     let validate = {
    1428            8 :                         let conf = self.tenant_conf.load();
    1429            8 :                         conf.location.attach_mode == AttachmentMode::Single
    1430            8 :                             && !conf.is_gc_blocked_by_lsn_lease_deadline()
    1431              :                     };
    1432              : 
    1433            8 :                     if init || validate {
    1434            8 :                         let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
    1435            8 :                         if lsn < *latest_gc_cutoff_lsn {
    1436            2 :                             bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
    1437            6 :                         }
    1438            0 :                     }
    1439              : 
    1440            6 :                     let dt: DateTime<Utc> = valid_until.into();
    1441            6 :                     info!("lease created, valid until {}", dt);
    1442            6 :                     vacant.insert(LsnLease { valid_until }).clone()
    1443              :                 }
    1444              :             }
    1445              :         };
    1446              : 
    1447           12 :         Ok(lease)
    1448           14 :     }
    1449              : 
    1450              :     /// Flush to disk all data that was written with the put_* functions
    1451         1092 :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    1452              :     pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
    1453              :         self.freeze_and_flush0().await
    1454              :     }
    1455              : 
    1456              :     // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
    1457              :     // polluting the span hierarchy.
    1458         1092 :     pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
    1459         1092 :         let token = {
    1460              :             // Freeze the current open in-memory layer. It will be written to disk on next
    1461              :             // iteration.
    1462         1092 :             let mut g = self.write_lock.lock().await;
    1463              : 
    1464         1092 :             let to_lsn = self.get_last_record_lsn();
    1465         1092 :             self.freeze_inmem_layer_at(to_lsn, &mut g).await?
    1466              :         };
    1467         1092 :         self.wait_flush_completion(token).await
    1468         1092 :     }
    1469              : 
    1470              :     // Check if an open ephemeral layer should be closed: this provides
    1471              :     // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
    1472              :     // an ephemeral layer open forever when idle.  It also freezes layers if the global limit on
    1473              :     // ephemeral layer bytes has been breached.
    1474            0 :     pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
    1475            0 :         let Ok(mut write_guard) = self.write_lock.try_lock() else {
    1476              :             // If the write lock is held, there is an active wal receiver: rolling open layers
    1477              :             // is their responsibility while they hold this lock.
    1478            0 :             return;
    1479              :         };
    1480              : 
    1481              :         // FIXME: why not early exit? because before #7927 the state would had been cleared every
    1482              :         // time, and this was missed.
    1483              :         // if write_guard.is_none() { return; }
    1484              : 
    1485            0 :         let Ok(layers_guard) = self.layers.try_read() else {
    1486              :             // Don't block if the layer lock is busy
    1487            0 :             return;
    1488              :         };
    1489              : 
    1490            0 :         let Ok(lm) = layers_guard.layer_map() else {
    1491            0 :             return;
    1492              :         };
    1493              : 
    1494            0 :         let Some(open_layer) = &lm.open_layer else {
    1495              :             // If there is no open layer, we have no layer freezing to do.  However, we might need to generate
    1496              :             // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
    1497              :             // that didn't result in writes to this shard.
    1498              : 
    1499              :             // Must not hold the layers lock while waiting for a flush.
    1500            0 :             drop(layers_guard);
    1501            0 : 
    1502            0 :             let last_record_lsn = self.get_last_record_lsn();
    1503            0 :             let disk_consistent_lsn = self.get_disk_consistent_lsn();
    1504            0 :             if last_record_lsn > disk_consistent_lsn {
    1505              :                 // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
    1506              :                 // we are a sharded tenant and have skipped some WAL
    1507            0 :                 let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
    1508            0 :                 if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
    1509              :                     // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
    1510              :                     // without any data ingested (yet) doesn't write a remote index as soon as it
    1511              :                     // sees its LSN advance: we only do this if we've been layer-less
    1512              :                     // for some time.
    1513            0 :                     tracing::debug!(
    1514            0 :                         "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
    1515              :                         disk_consistent_lsn,
    1516              :                         last_record_lsn
    1517              :                     );
    1518              : 
    1519              :                     // The flush loop will update remote consistent LSN as well as disk consistent LSN.
    1520              :                     // We know there is no open layer, so we can request freezing without actually
    1521              :                     // freezing anything. This is true even if we have dropped the layers_guard, we
    1522              :                     // still hold the write_guard.
    1523            0 :                     let _ = async {
    1524            0 :                         let token = self
    1525            0 :                             .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
    1526            0 :                             .await?;
    1527            0 :                         self.wait_flush_completion(token).await
    1528            0 :                     }
    1529            0 :                     .await;
    1530            0 :                 }
    1531            0 :             }
    1532              : 
    1533            0 :             return;
    1534              :         };
    1535              : 
    1536            0 :         let Some(current_size) = open_layer.try_len() else {
    1537              :             // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
    1538              :             // read lock to get size should always succeed.
    1539            0 :             tracing::warn!("Lock conflict while reading size of open layer");
    1540            0 :             return;
    1541              :         };
    1542              : 
    1543            0 :         let current_lsn = self.get_last_record_lsn();
    1544              : 
    1545            0 :         let checkpoint_distance_override = open_layer.tick().await;
    1546              : 
    1547            0 :         if let Some(size_override) = checkpoint_distance_override {
    1548            0 :             if current_size > size_override {
    1549              :                 // This is not harmful, but it only happens in relatively rare cases where
    1550              :                 // time-based checkpoints are not happening fast enough to keep the amount of
    1551              :                 // ephemeral data within configured limits.  It's a sign of stress on the system.
    1552            0 :                 tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
    1553            0 :             }
    1554            0 :         }
    1555              : 
    1556            0 :         let checkpoint_distance =
    1557            0 :             checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
    1558            0 : 
    1559            0 :         if self.should_roll(
    1560            0 :             current_size,
    1561            0 :             current_size,
    1562            0 :             checkpoint_distance,
    1563            0 :             self.get_last_record_lsn(),
    1564            0 :             self.last_freeze_at.load(),
    1565            0 :             open_layer.get_opened_at(),
    1566            0 :         ) {
    1567            0 :             match open_layer.info() {
    1568            0 :                 InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
    1569            0 :                     // We may reach this point if the layer was already frozen by not yet flushed: flushing
    1570            0 :                     // happens asynchronously in the background.
    1571            0 :                     tracing::debug!(
    1572            0 :                         "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
    1573              :                     );
    1574              :                 }
    1575              :                 InMemoryLayerInfo::Open { .. } => {
    1576              :                     // Upgrade to a write lock and freeze the layer
    1577            0 :                     drop(layers_guard);
    1578            0 :                     let res = self
    1579            0 :                         .freeze_inmem_layer_at(current_lsn, &mut write_guard)
    1580            0 :                         .await;
    1581              : 
    1582            0 :                     if let Err(e) = res {
    1583            0 :                         tracing::info!(
    1584            0 :                             "failed to flush frozen layer after background freeze: {e:#}"
    1585              :                         );
    1586            0 :                     }
    1587              :                 }
    1588              :             }
    1589            0 :         }
    1590            0 :     }
    1591              : 
    1592              :     /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
    1593              :     ///
    1594              :     /// This is neccessary but not sufficient for offloading of the timeline as it might have
    1595              :     /// child timelines that are not offloaded yet.
    1596            0 :     pub(crate) fn can_offload(&self) -> (bool, &'static str) {
    1597            0 :         if self.remote_client.is_archived() != Some(true) {
    1598            0 :             return (false, "the timeline is not archived");
    1599            0 :         }
    1600            0 :         if !self.remote_client.no_pending_work() {
    1601              :             // if the remote client is still processing some work, we can't offload
    1602            0 :             return (false, "the upload queue is not drained yet");
    1603            0 :         }
    1604            0 : 
    1605            0 :         (true, "ok")
    1606            0 :     }
    1607              : 
    1608              :     /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
    1609              :     /// compaction tasks.
    1610          364 :     pub(crate) async fn compact(
    1611          364 :         self: &Arc<Self>,
    1612          364 :         cancel: &CancellationToken,
    1613          364 :         flags: EnumSet<CompactFlags>,
    1614          364 :         ctx: &RequestContext,
    1615          364 :     ) -> Result<bool, CompactionError> {
    1616          364 :         // most likely the cancellation token is from background task, but in tests it could be the
    1617          364 :         // request task as well.
    1618          364 : 
    1619          364 :         let prepare = async move {
    1620          364 :             let guard = self.compaction_lock.lock().await;
    1621              : 
    1622          364 :             let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    1623          364 :                 BackgroundLoopKind::Compaction,
    1624          364 :                 ctx,
    1625          364 :             )
    1626            0 :             .await;
    1627              : 
    1628          364 :             (guard, permit)
    1629          364 :         };
    1630              : 
    1631              :         // this wait probably never needs any "long time spent" logging, because we already nag if
    1632              :         // compaction task goes over it's period (20s) which is quite often in production.
    1633          364 :         let (_guard, _permit) = tokio::select! {
    1634          364 :             tuple = prepare => { tuple },
    1635          364 :             _ = self.cancel.cancelled() => return Ok(false),
    1636          364 :             _ = cancel.cancelled() => return Ok(false),
    1637              :         };
    1638              : 
    1639          364 :         let last_record_lsn = self.get_last_record_lsn();
    1640          364 : 
    1641          364 :         // Last record Lsn could be zero in case the timeline was just created
    1642          364 :         if !last_record_lsn.is_valid() {
    1643            0 :             warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
    1644            0 :             return Ok(false);
    1645          364 :         }
    1646          364 : 
    1647          364 :         match self.get_compaction_algorithm_settings().kind {
    1648              :             CompactionAlgorithm::Tiered => {
    1649            0 :                 self.compact_tiered(cancel, ctx).await?;
    1650            0 :                 Ok(false)
    1651              :             }
    1652        37131 :             CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
    1653              :         }
    1654          364 :     }
    1655              : 
    1656              :     /// Mutate the timeline with a [`TimelineWriter`].
    1657      5133162 :     pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
    1658      5133162 :         TimelineWriter {
    1659      5133162 :             tl: self,
    1660      5133162 :             write_guard: self.write_lock.lock().await,
    1661              :         }
    1662      5133162 :     }
    1663              : 
    1664            0 :     pub(crate) fn activate(
    1665            0 :         self: &Arc<Self>,
    1666            0 :         parent: Arc<crate::tenant::Tenant>,
    1667            0 :         broker_client: BrokerClientChannel,
    1668            0 :         background_jobs_can_start: Option<&completion::Barrier>,
    1669            0 :         ctx: &RequestContext,
    1670            0 :     ) {
    1671            0 :         if self.tenant_shard_id.is_shard_zero() {
    1672            0 :             // Logical size is only maintained accurately on shard zero.
    1673            0 :             self.spawn_initial_logical_size_computation_task(ctx);
    1674            0 :         }
    1675            0 :         self.launch_wal_receiver(ctx, broker_client);
    1676            0 :         self.set_state(TimelineState::Active);
    1677            0 :         self.launch_eviction_task(parent, background_jobs_can_start);
    1678            0 :     }
    1679              : 
    1680              :     /// After this function returns, there are no timeline-scoped tasks are left running.
    1681              :     ///
    1682              :     /// The preferred pattern for is:
    1683              :     /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
    1684              :     /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
    1685              :     ///   go the extra mile and keep track of JoinHandles
    1686              :     /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
    1687              :     ///   instead of spawning directly on a runtime. It is a more composable / testable pattern.
    1688              :     ///
    1689              :     /// For legacy reasons, we still have multiple tasks spawned using
    1690              :     /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
    1691              :     /// We refer to these as "timeline-scoped task_mgr tasks".
    1692              :     /// Some of these tasks are already sensitive to Timeline::cancel while others are
    1693              :     /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
    1694              :     /// or [`task_mgr::shutdown_watcher`].
    1695              :     /// We want to gradually convert the code base away from these.
    1696              :     ///
    1697              :     /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
    1698              :     /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
    1699              :     /// ones that aren't mentioned here):
    1700              :     /// - [`TaskKind::TimelineDeletionWorker`]
    1701              :     ///    - NB: also used for tenant deletion
    1702              :     /// - [`TaskKind::RemoteUploadTask`]`
    1703              :     /// - [`TaskKind::InitialLogicalSizeCalculation`]
    1704              :     /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
    1705              :     // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
    1706              :     /// - [`TaskKind::Eviction`]
    1707              :     /// - [`TaskKind::LayerFlushTask`]
    1708              :     /// - [`TaskKind::OndemandLogicalSizeCalculation`]
    1709              :     /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
    1710           10 :     pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
    1711           10 :         debug_assert_current_span_has_tenant_and_timeline_id();
    1712           10 : 
    1713           10 :         // Regardless of whether we're going to try_freeze_and_flush
    1714           10 :         // or not, stop ingesting any more data. Walreceiver only provides
    1715           10 :         // cancellation but no "wait until gone", because it uses the Timeline::gate.
    1716           10 :         // So, only after the self.gate.close() below will we know for sure that
    1717           10 :         // no walreceiver tasks are left.
    1718           10 :         // For `try_freeze_and_flush=true`, this means that we might still be ingesting
    1719           10 :         // data during the call to `self.freeze_and_flush()` below.
    1720           10 :         // That's not ideal, but, we don't have the concept of a ChildGuard,
    1721           10 :         // which is what we'd need to properly model early shutdown of the walreceiver
    1722           10 :         // task sub-tree before the other Timeline task sub-trees.
    1723           10 :         let walreceiver = self.walreceiver.lock().unwrap().take();
    1724           10 :         tracing::debug!(
    1725            0 :             is_some = walreceiver.is_some(),
    1726            0 :             "Waiting for WalReceiverManager..."
    1727              :         );
    1728           10 :         if let Some(walreceiver) = walreceiver {
    1729            0 :             walreceiver.cancel();
    1730           10 :         }
    1731              :         // ... and inform any waiters for newer LSNs that there won't be any.
    1732           10 :         self.last_record_lsn.shutdown();
    1733           10 : 
    1734           10 :         if let ShutdownMode::FreezeAndFlush = mode {
    1735            6 :             if let Some((open, frozen)) = self
    1736            6 :                 .layers
    1737            6 :                 .read()
    1738            0 :                 .await
    1739            6 :                 .layer_map()
    1740            6 :                 .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
    1741            6 :                 .ok()
    1742            6 :                 .filter(|(open, frozen)| *open || *frozen > 0)
    1743              :             {
    1744            0 :                 tracing::info!(?open, frozen, "flushing and freezing on shutdown");
    1745            6 :             } else {
    1746            6 :                 // this is double-shutdown, ignore it
    1747            6 :             }
    1748              : 
    1749              :             // we shut down walreceiver above, so, we won't add anything more
    1750              :             // to the InMemoryLayer; freeze it and wait for all frozen layers
    1751              :             // to reach the disk & upload queue, then shut the upload queue and
    1752              :             // wait for it to drain.
    1753            6 :             match self.freeze_and_flush().await {
    1754              :                 Ok(_) => {
    1755              :                     // drain the upload queue
    1756              :                     // if we did not wait for completion here, it might be our shutdown process
    1757              :                     // didn't wait for remote uploads to complete at all, as new tasks can forever
    1758              :                     // be spawned.
    1759              :                     //
    1760              :                     // what is problematic is the shutting down of RemoteTimelineClient, because
    1761              :                     // obviously it does not make sense to stop while we wait for it, but what
    1762              :                     // about corner cases like s3 suddenly hanging up?
    1763            6 :                     self.remote_client.shutdown().await;
    1764              :                 }
    1765              :                 Err(FlushLayerError::Cancelled) => {
    1766              :                     // this is likely the second shutdown, ignore silently.
    1767              :                     // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
    1768            0 :                     debug_assert!(self.cancel.is_cancelled());
    1769              :                 }
    1770            0 :                 Err(e) => {
    1771            0 :                     // Non-fatal.  Shutdown is infallible.  Failures to flush just mean that
    1772            0 :                     // we have some extra WAL replay to do next time the timeline starts.
    1773            0 :                     warn!("failed to freeze and flush: {e:#}");
    1774              :                 }
    1775              :             }
    1776              : 
    1777              :             // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
    1778              :             // we also do a final check here to ensure that the queue is empty.
    1779            6 :             if !self.remote_client.no_pending_work() {
    1780            0 :                 warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
    1781            6 :             }
    1782            4 :         }
    1783              : 
    1784           10 :         if let ShutdownMode::Flush = mode {
    1785              :             // drain the upload queue
    1786            2 :             self.remote_client.shutdown().await;
    1787            2 :             if !self.remote_client.no_pending_work() {
    1788            0 :                 warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
    1789            2 :             }
    1790            8 :         }
    1791              : 
    1792              :         // Signal any subscribers to our cancellation token to drop out
    1793           10 :         tracing::debug!("Cancelling CancellationToken");
    1794           10 :         self.cancel.cancel();
    1795           10 : 
    1796           10 :         // Ensure Prevent new page service requests from starting.
    1797           10 :         self.handles.shutdown();
    1798           10 : 
    1799           10 :         // Transition the remote_client into a state where it's only useful for timeline deletion.
    1800           10 :         // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
    1801           10 :         self.remote_client.stop();
    1802           10 : 
    1803           10 :         // As documented in remote_client.stop()'s doc comment, it's our responsibility
    1804           10 :         // to shut down the upload queue tasks.
    1805           10 :         // TODO: fix that, task management should be encapsulated inside remote_client.
    1806           10 :         task_mgr::shutdown_tasks(
    1807           10 :             Some(TaskKind::RemoteUploadTask),
    1808           10 :             Some(self.tenant_shard_id),
    1809           10 :             Some(self.timeline_id),
    1810           10 :         )
    1811            0 :         .await;
    1812              : 
    1813              :         // TODO: work toward making this a no-op. See this function's doc comment for more context.
    1814           10 :         tracing::debug!("Waiting for tasks...");
    1815           10 :         task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
    1816              : 
    1817              :         {
    1818              :             // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
    1819              :             // open.
    1820           10 :             let mut write_guard = self.write_lock.lock().await;
    1821           10 :             self.layers.write().await.shutdown(&mut write_guard);
    1822           10 :         }
    1823           10 : 
    1824           10 :         // Finally wait until any gate-holders are complete.
    1825           10 :         //
    1826           10 :         // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
    1827           10 :         // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
    1828           10 :         self.gate.close().await;
    1829              : 
    1830           10 :         self.metrics.shutdown();
    1831           10 :     }
    1832              : 
    1833          420 :     pub(crate) fn set_state(&self, new_state: TimelineState) {
    1834          420 :         match (self.current_state(), new_state) {
    1835          420 :             (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
    1836            2 :                 info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
    1837              :             }
    1838            0 :             (st, TimelineState::Loading) => {
    1839            0 :                 error!("ignoring transition from {st:?} into Loading state");
    1840              :             }
    1841            0 :             (TimelineState::Broken { .. }, new_state) => {
    1842            0 :                 error!("Ignoring state update {new_state:?} for broken timeline");
    1843              :             }
    1844              :             (TimelineState::Stopping, TimelineState::Active) => {
    1845            0 :                 error!("Not activating a Stopping timeline");
    1846              :             }
    1847          418 :             (_, new_state) => {
    1848          418 :                 self.state.send_replace(new_state);
    1849          418 :             }
    1850              :         }
    1851          420 :     }
    1852              : 
    1853            2 :     pub(crate) fn set_broken(&self, reason: String) {
    1854            2 :         let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
    1855            2 :         let broken_state = TimelineState::Broken {
    1856            2 :             reason,
    1857            2 :             backtrace: backtrace_str,
    1858            2 :         };
    1859            2 :         self.set_state(broken_state);
    1860            2 : 
    1861            2 :         // Although the Broken state is not equivalent to shutdown() (shutdown will be called
    1862            2 :         // later when this tenant is detach or the process shuts down), firing the cancellation token
    1863            2 :         // here avoids the need for other tasks to watch for the Broken state explicitly.
    1864            2 :         self.cancel.cancel();
    1865            2 :     }
    1866              : 
    1867       225864 :     pub(crate) fn current_state(&self) -> TimelineState {
    1868       225864 :         self.state.borrow().clone()
    1869       225864 :     }
    1870              : 
    1871            6 :     pub(crate) fn is_broken(&self) -> bool {
    1872            6 :         matches!(&*self.state.borrow(), TimelineState::Broken { .. })
    1873            6 :     }
    1874              : 
    1875          222 :     pub(crate) fn is_active(&self) -> bool {
    1876          222 :         self.current_state() == TimelineState::Active
    1877          222 :     }
    1878              : 
    1879            2 :     pub(crate) fn is_archived(&self) -> Option<bool> {
    1880            2 :         self.remote_client.is_archived()
    1881            2 :     }
    1882              : 
    1883          368 :     pub(crate) fn is_stopping(&self) -> bool {
    1884          368 :         self.current_state() == TimelineState::Stopping
    1885          368 :     }
    1886              : 
    1887            0 :     pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
    1888            0 :         self.state.subscribe()
    1889            0 :     }
    1890              : 
    1891       224856 :     pub(crate) async fn wait_to_become_active(
    1892       224856 :         &self,
    1893       224856 :         _ctx: &RequestContext, // Prepare for use by cancellation
    1894       224856 :     ) -> Result<(), TimelineState> {
    1895       224856 :         let mut receiver = self.state.subscribe();
    1896              :         loop {
    1897       224856 :             let current_state = receiver.borrow().clone();
    1898       224856 :             match current_state {
    1899              :                 TimelineState::Loading => {
    1900            0 :                     receiver
    1901            0 :                         .changed()
    1902            0 :                         .await
    1903            0 :                         .expect("holding a reference to self");
    1904              :                 }
    1905              :                 TimelineState::Active { .. } => {
    1906       224854 :                     return Ok(());
    1907              :                 }
    1908              :                 TimelineState::Broken { .. } | TimelineState::Stopping => {
    1909              :                     // There's no chance the timeline can transition back into ::Active
    1910            2 :                     return Err(current_state);
    1911              :                 }
    1912              :             }
    1913              :         }
    1914       224856 :     }
    1915              : 
    1916            0 :     pub(crate) async fn layer_map_info(
    1917            0 :         &self,
    1918            0 :         reset: LayerAccessStatsReset,
    1919            0 :     ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
    1920            0 :         let guard = self.layers.read().await;
    1921            0 :         let layer_map = guard.layer_map()?;
    1922            0 :         let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
    1923            0 :         if let Some(open_layer) = &layer_map.open_layer {
    1924            0 :             in_memory_layers.push(open_layer.info());
    1925            0 :         }
    1926            0 :         for frozen_layer in &layer_map.frozen_layers {
    1927            0 :             in_memory_layers.push(frozen_layer.info());
    1928            0 :         }
    1929              : 
    1930            0 :         let historic_layers = layer_map
    1931            0 :             .iter_historic_layers()
    1932            0 :             .map(|desc| guard.get_from_desc(&desc).info(reset))
    1933            0 :             .collect();
    1934            0 : 
    1935            0 :         Ok(LayerMapInfo {
    1936            0 :             in_memory_layers,
    1937            0 :             historic_layers,
    1938            0 :         })
    1939            0 :     }
    1940              : 
    1941            0 :     #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
    1942              :     pub(crate) async fn download_layer(
    1943              :         &self,
    1944              :         layer_file_name: &LayerName,
    1945              :     ) -> anyhow::Result<Option<bool>> {
    1946              :         let Some(layer) = self.find_layer(layer_file_name).await? else {
    1947              :             return Ok(None);
    1948              :         };
    1949              : 
    1950              :         layer.download().await?;
    1951              : 
    1952              :         Ok(Some(true))
    1953              :     }
    1954              : 
    1955              :     /// Evict just one layer.
    1956              :     ///
    1957              :     /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
    1958            0 :     pub(crate) async fn evict_layer(
    1959            0 :         &self,
    1960            0 :         layer_file_name: &LayerName,
    1961            0 :     ) -> anyhow::Result<Option<bool>> {
    1962            0 :         let _gate = self
    1963            0 :             .gate
    1964            0 :             .enter()
    1965            0 :             .map_err(|_| anyhow::anyhow!("Shutting down"))?;
    1966              : 
    1967            0 :         let Some(local_layer) = self.find_layer(layer_file_name).await? else {
    1968            0 :             return Ok(None);
    1969              :         };
    1970              : 
    1971              :         // curl has this by default
    1972            0 :         let timeout = std::time::Duration::from_secs(120);
    1973            0 : 
    1974            0 :         match local_layer.evict_and_wait(timeout).await {
    1975            0 :             Ok(()) => Ok(Some(true)),
    1976            0 :             Err(EvictionError::NotFound) => Ok(Some(false)),
    1977            0 :             Err(EvictionError::Downloaded) => Ok(Some(false)),
    1978            0 :             Err(EvictionError::Timeout) => Ok(Some(false)),
    1979              :         }
    1980            0 :     }
    1981              : 
    1982      4803010 :     fn should_roll(
    1983      4803010 :         &self,
    1984      4803010 :         layer_size: u64,
    1985      4803010 :         projected_layer_size: u64,
    1986      4803010 :         checkpoint_distance: u64,
    1987      4803010 :         projected_lsn: Lsn,
    1988      4803010 :         last_freeze_at: Lsn,
    1989      4803010 :         opened_at: Instant,
    1990      4803010 :     ) -> bool {
    1991      4803010 :         let distance = projected_lsn.widening_sub(last_freeze_at);
    1992      4803010 : 
    1993      4803010 :         // Rolling the open layer can be triggered by:
    1994      4803010 :         // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
    1995      4803010 :         //    the safekeepers need to store.  For sharded tenants, we multiply by shard count to
    1996      4803010 :         //    account for how writes are distributed across shards: we expect each node to consume
    1997      4803010 :         //    1/count of the LSN on average.
    1998      4803010 :         // 2. The size of the currently open layer.
    1999      4803010 :         // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
    2000      4803010 :         //    up and suspend activity.
    2001      4803010 :         if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
    2002            0 :             info!(
    2003            0 :                 "Will roll layer at {} with layer size {} due to LSN distance ({})",
    2004              :                 projected_lsn, layer_size, distance
    2005              :             );
    2006              : 
    2007            0 :             true
    2008      4803010 :         } else if projected_layer_size >= checkpoint_distance {
    2009              :             // NB: this check is relied upon by:
    2010           80 :             let _ = IndexEntry::validate_checkpoint_distance;
    2011           80 :             info!(
    2012            0 :                 "Will roll layer at {} with layer size {} due to layer size ({})",
    2013              :                 projected_lsn, layer_size, projected_layer_size
    2014              :             );
    2015              : 
    2016           80 :             true
    2017      4802930 :         } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
    2018            0 :             info!(
    2019            0 :                 "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
    2020            0 :                 projected_lsn,
    2021            0 :                 layer_size,
    2022            0 :                 opened_at.elapsed()
    2023              :             );
    2024              : 
    2025            0 :             true
    2026              :         } else {
    2027      4802930 :             false
    2028              :         }
    2029      4803010 :     }
    2030              : }
    2031              : 
    2032              : /// Number of times we will compute partition within a checkpoint distance.
    2033              : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
    2034              : 
    2035              : // Private functions
    2036              : impl Timeline {
    2037           12 :     pub(crate) fn get_lsn_lease_length(&self) -> Duration {
    2038           12 :         let tenant_conf = self.tenant_conf.load();
    2039           12 :         tenant_conf
    2040           12 :             .tenant_conf
    2041           12 :             .lsn_lease_length
    2042           12 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
    2043           12 :     }
    2044              : 
    2045            0 :     pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
    2046            0 :         let tenant_conf = self.tenant_conf.load();
    2047            0 :         tenant_conf
    2048            0 :             .tenant_conf
    2049            0 :             .lsn_lease_length_for_ts
    2050            0 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
    2051            0 :     }
    2052              : 
    2053            0 :     pub(crate) fn get_lazy_slru_download(&self) -> bool {
    2054            0 :         let tenant_conf = self.tenant_conf.load();
    2055            0 :         tenant_conf
    2056            0 :             .tenant_conf
    2057            0 :             .lazy_slru_download
    2058            0 :             .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
    2059            0 :     }
    2060              : 
    2061      4804616 :     fn get_checkpoint_distance(&self) -> u64 {
    2062      4804616 :         let tenant_conf = self.tenant_conf.load();
    2063      4804616 :         tenant_conf
    2064      4804616 :             .tenant_conf
    2065      4804616 :             .checkpoint_distance
    2066      4804616 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
    2067      4804616 :     }
    2068              : 
    2069      4802930 :     fn get_checkpoint_timeout(&self) -> Duration {
    2070      4802930 :         let tenant_conf = self.tenant_conf.load();
    2071      4802930 :         tenant_conf
    2072      4802930 :             .tenant_conf
    2073      4802930 :             .checkpoint_timeout
    2074      4802930 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
    2075      4802930 :     }
    2076              : 
    2077          618 :     fn get_compaction_target_size(&self) -> u64 {
    2078          618 :         let tenant_conf = self.tenant_conf.load();
    2079          618 :         tenant_conf
    2080          618 :             .tenant_conf
    2081          618 :             .compaction_target_size
    2082          618 :             .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
    2083          618 :     }
    2084              : 
    2085         1536 :     fn get_compaction_threshold(&self) -> usize {
    2086         1536 :         let tenant_conf = self.tenant_conf.load();
    2087         1536 :         tenant_conf
    2088         1536 :             .tenant_conf
    2089         1536 :             .compaction_threshold
    2090         1536 :             .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
    2091         1536 :     }
    2092              : 
    2093           14 :     fn get_image_creation_threshold(&self) -> usize {
    2094           14 :         let tenant_conf = self.tenant_conf.load();
    2095           14 :         tenant_conf
    2096           14 :             .tenant_conf
    2097           14 :             .image_creation_threshold
    2098           14 :             .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
    2099           14 :     }
    2100              : 
    2101          364 :     fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
    2102          364 :         let tenant_conf = &self.tenant_conf.load();
    2103          364 :         tenant_conf
    2104          364 :             .tenant_conf
    2105          364 :             .compaction_algorithm
    2106          364 :             .as_ref()
    2107          364 :             .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
    2108          364 :             .clone()
    2109          364 :     }
    2110              : 
    2111            0 :     fn get_eviction_policy(&self) -> EvictionPolicy {
    2112            0 :         let tenant_conf = self.tenant_conf.load();
    2113            0 :         tenant_conf
    2114            0 :             .tenant_conf
    2115            0 :             .eviction_policy
    2116            0 :             .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
    2117            0 :     }
    2118              : 
    2119          418 :     fn get_evictions_low_residence_duration_metric_threshold(
    2120          418 :         tenant_conf: &TenantConfOpt,
    2121          418 :         default_tenant_conf: &TenantConf,
    2122          418 :     ) -> Duration {
    2123          418 :         tenant_conf
    2124          418 :             .evictions_low_residence_duration_metric_threshold
    2125          418 :             .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
    2126          418 :     }
    2127              : 
    2128          716 :     fn get_image_layer_creation_check_threshold(&self) -> u8 {
    2129          716 :         let tenant_conf = self.tenant_conf.load();
    2130          716 :         tenant_conf
    2131          716 :             .tenant_conf
    2132          716 :             .image_layer_creation_check_threshold
    2133          716 :             .unwrap_or(
    2134          716 :                 self.conf
    2135          716 :                     .default_tenant_conf
    2136          716 :                     .image_layer_creation_check_threshold,
    2137          716 :             )
    2138          716 :     }
    2139              : 
    2140            0 :     pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
    2141            0 :         // NB: Most tenant conf options are read by background loops, so,
    2142            0 :         // changes will automatically be picked up.
    2143            0 : 
    2144            0 :         // The threshold is embedded in the metric. So, we need to update it.
    2145            0 :         {
    2146            0 :             let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
    2147            0 :                 new_conf,
    2148            0 :                 &self.conf.default_tenant_conf,
    2149            0 :             );
    2150            0 : 
    2151            0 :             let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
    2152            0 :             let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
    2153            0 : 
    2154            0 :             let timeline_id_str = self.timeline_id.to_string();
    2155            0 :             self.metrics
    2156            0 :                 .evictions_with_low_residence_duration
    2157            0 :                 .write()
    2158            0 :                 .unwrap()
    2159            0 :                 .change_threshold(
    2160            0 :                     &tenant_id_str,
    2161            0 :                     &shard_id_str,
    2162            0 :                     &timeline_id_str,
    2163            0 :                     new_threshold,
    2164            0 :                 );
    2165            0 :         }
    2166            0 :     }
    2167              : 
    2168              :     /// Open a Timeline handle.
    2169              :     ///
    2170              :     /// Loads the metadata for the timeline into memory, but not the layer map.
    2171              :     #[allow(clippy::too_many_arguments)]
    2172          418 :     pub(super) fn new(
    2173          418 :         conf: &'static PageServerConf,
    2174          418 :         tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
    2175          418 :         metadata: &TimelineMetadata,
    2176          418 :         ancestor: Option<Arc<Timeline>>,
    2177          418 :         timeline_id: TimelineId,
    2178          418 :         tenant_shard_id: TenantShardId,
    2179          418 :         generation: Generation,
    2180          418 :         shard_identity: ShardIdentity,
    2181          418 :         walredo_mgr: Option<Arc<super::WalRedoManager>>,
    2182          418 :         resources: TimelineResources,
    2183          418 :         pg_version: u32,
    2184          418 :         state: TimelineState,
    2185          418 :         attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
    2186          418 :         create_idempotency: crate::tenant::CreateTimelineIdempotency,
    2187          418 :         cancel: CancellationToken,
    2188          418 :     ) -> Arc<Self> {
    2189          418 :         let disk_consistent_lsn = metadata.disk_consistent_lsn();
    2190          418 :         let (state, _) = watch::channel(state);
    2191          418 : 
    2192          418 :         let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
    2193          418 :         let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
    2194          418 : 
    2195          418 :         let evictions_low_residence_duration_metric_threshold = {
    2196          418 :             let loaded_tenant_conf = tenant_conf.load();
    2197          418 :             Self::get_evictions_low_residence_duration_metric_threshold(
    2198          418 :                 &loaded_tenant_conf.tenant_conf,
    2199          418 :                 &conf.default_tenant_conf,
    2200          418 :             )
    2201              :         };
    2202              : 
    2203          418 :         if let Some(ancestor) = &ancestor {
    2204          230 :             let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
    2205          230 :             // If we construct an explicit timeline object, it's obviously not offloaded
    2206          230 :             let is_offloaded = MaybeOffloaded::No;
    2207          230 :             ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
    2208          230 :         }
    2209              : 
    2210          418 :         Arc::new_cyclic(|myself| {
    2211          418 :             let metrics = TimelineMetrics::new(
    2212          418 :                 &tenant_shard_id,
    2213          418 :                 &timeline_id,
    2214          418 :                 crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
    2215          418 :                     "mtime",
    2216          418 :                     evictions_low_residence_duration_metric_threshold,
    2217          418 :                 ),
    2218          418 :             );
    2219          418 :             let aux_file_metrics = metrics.aux_file_size_gauge.clone();
    2220              : 
    2221          418 :             let mut result = Timeline {
    2222          418 :                 conf,
    2223          418 :                 tenant_conf,
    2224          418 :                 myself: myself.clone(),
    2225          418 :                 timeline_id,
    2226          418 :                 tenant_shard_id,
    2227          418 :                 generation,
    2228          418 :                 shard_identity,
    2229          418 :                 pg_version,
    2230          418 :                 layers: Default::default(),
    2231          418 : 
    2232          418 :                 walredo_mgr,
    2233          418 :                 walreceiver: Mutex::new(None),
    2234          418 : 
    2235          418 :                 remote_client: Arc::new(resources.remote_client),
    2236          418 : 
    2237          418 :                 // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
    2238          418 :                 last_record_lsn: SeqWait::new(RecordLsn {
    2239          418 :                     last: disk_consistent_lsn,
    2240          418 :                     prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
    2241          418 :                 }),
    2242          418 :                 disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
    2243          418 : 
    2244          418 :                 last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
    2245          418 :                 last_freeze_ts: RwLock::new(Instant::now()),
    2246          418 : 
    2247          418 :                 loaded_at: (disk_consistent_lsn, SystemTime::now()),
    2248          418 : 
    2249          418 :                 ancestor_timeline: ancestor,
    2250          418 :                 ancestor_lsn: metadata.ancestor_lsn(),
    2251          418 : 
    2252          418 :                 metrics,
    2253          418 : 
    2254          418 :                 query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
    2255          418 :                     &tenant_shard_id,
    2256          418 :                     &timeline_id,
    2257          418 :                 ),
    2258          418 : 
    2259         2926 :                 directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
    2260          418 : 
    2261          418 :                 flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
    2262          418 : 
    2263          418 :                 layer_flush_start_tx,
    2264          418 :                 layer_flush_done_tx,
    2265          418 : 
    2266          418 :                 write_lock: tokio::sync::Mutex::new(None),
    2267          418 : 
    2268          418 :                 gc_info: std::sync::RwLock::new(GcInfo::default()),
    2269          418 : 
    2270          418 :                 latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
    2271          418 :                 initdb_lsn: metadata.initdb_lsn(),
    2272          418 : 
    2273          418 :                 current_logical_size: if disk_consistent_lsn.is_valid() {
    2274              :                     // we're creating timeline data with some layer files existing locally,
    2275              :                     // need to recalculate timeline's logical size based on data in the layers.
    2276          234 :                     LogicalSize::deferred_initial(disk_consistent_lsn)
    2277              :                 } else {
    2278              :                     // we're creating timeline data without any layers existing locally,
    2279              :                     // initial logical size is 0.
    2280          184 :                     LogicalSize::empty_initial()
    2281              :                 },
    2282          418 :                 partitioning: tokio::sync::Mutex::new((
    2283          418 :                     (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
    2284          418 :                     Lsn(0),
    2285          418 :                 )),
    2286          418 :                 repartition_threshold: 0,
    2287          418 :                 last_image_layer_creation_check_at: AtomicLsn::new(0),
    2288          418 :                 last_image_layer_creation_check_instant: Mutex::new(None),
    2289          418 : 
    2290          418 :                 last_received_wal: Mutex::new(None),
    2291          418 :                 rel_size_cache: RwLock::new(RelSizeCache {
    2292          418 :                     complete_as_of: disk_consistent_lsn,
    2293          418 :                     map: HashMap::new(),
    2294          418 :                 }),
    2295          418 : 
    2296          418 :                 download_all_remote_layers_task_info: RwLock::new(None),
    2297          418 : 
    2298          418 :                 state,
    2299          418 : 
    2300          418 :                 eviction_task_timeline_state: tokio::sync::Mutex::new(
    2301          418 :                     EvictionTaskTimelineState::default(),
    2302          418 :                 ),
    2303          418 :                 delete_progress: TimelineDeleteProgress::default(),
    2304          418 : 
    2305          418 :                 cancel,
    2306          418 :                 gate: Gate::default(),
    2307          418 : 
    2308          418 :                 compaction_lock: tokio::sync::Mutex::default(),
    2309          418 :                 gc_lock: tokio::sync::Mutex::default(),
    2310          418 : 
    2311          418 :                 standby_horizon: AtomicLsn::new(0),
    2312          418 : 
    2313          418 :                 timeline_get_throttle: resources.timeline_get_throttle,
    2314          418 : 
    2315          418 :                 aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
    2316          418 : 
    2317          418 :                 #[cfg(test)]
    2318          418 :                 extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
    2319          418 : 
    2320          418 :                 l0_flush_global_state: resources.l0_flush_global_state,
    2321          418 : 
    2322          418 :                 handles: Default::default(),
    2323          418 : 
    2324          418 :                 attach_wal_lag_cooldown,
    2325          418 : 
    2326          418 :                 create_idempotency,
    2327          418 :             };
    2328          418 : 
    2329          418 :             result.repartition_threshold =
    2330          418 :                 result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
    2331          418 : 
    2332          418 :             result
    2333          418 :                 .metrics
    2334          418 :                 .last_record_gauge
    2335          418 :                 .set(disk_consistent_lsn.0 as i64);
    2336          418 :             result
    2337          418 :         })
    2338          418 :     }
    2339              : 
    2340          588 :     pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
    2341          588 :         let Ok(guard) = self.gate.enter() else {
    2342            0 :             info!("cannot start flush loop when the timeline gate has already been closed");
    2343            0 :             return;
    2344              :         };
    2345          588 :         let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
    2346          588 :         match *flush_loop_state {
    2347          412 :             FlushLoopState::NotStarted => (),
    2348              :             FlushLoopState::Running { .. } => {
    2349          176 :                 info!(
    2350            0 :                     "skipping attempt to start flush_loop twice {}/{}",
    2351            0 :                     self.tenant_shard_id, self.timeline_id
    2352              :                 );
    2353          176 :                 return;
    2354              :             }
    2355              :             FlushLoopState::Exited => {
    2356            0 :                 warn!(
    2357            0 :                     "ignoring attempt to restart exited flush_loop {}/{}",
    2358            0 :                     self.tenant_shard_id, self.timeline_id
    2359              :                 );
    2360            0 :                 return;
    2361              :             }
    2362              :         }
    2363              : 
    2364          412 :         let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
    2365          412 :         let self_clone = Arc::clone(self);
    2366          412 : 
    2367          412 :         debug!("spawning flush loop");
    2368          412 :         *flush_loop_state = FlushLoopState::Running {
    2369          412 :             #[cfg(test)]
    2370          412 :             expect_initdb_optimization: false,
    2371          412 :             #[cfg(test)]
    2372          412 :             initdb_optimization_count: 0,
    2373          412 :         };
    2374          412 :         task_mgr::spawn(
    2375          412 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2376          412 :             task_mgr::TaskKind::LayerFlushTask,
    2377          412 :             self.tenant_shard_id,
    2378          412 :             Some(self.timeline_id),
    2379          412 :             "layer flush task",
    2380          412 :             async move {
    2381          412 :                 let _guard = guard;
    2382          412 :                 let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
    2383        18044 :                 self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
    2384           10 :                 let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
    2385           10 :                 assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
    2386           10 :                 *flush_loop_state  = FlushLoopState::Exited;
    2387           10 :                 Ok(())
    2388           10 :             }
    2389          412 :             .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    2390              :         );
    2391          588 :     }
    2392              : 
    2393              :     /// Creates and starts the wal receiver.
    2394              :     ///
    2395              :     /// This function is expected to be called at most once per Timeline's lifecycle
    2396              :     /// when the timeline is activated.
    2397            0 :     fn launch_wal_receiver(
    2398            0 :         self: &Arc<Self>,
    2399            0 :         ctx: &RequestContext,
    2400            0 :         broker_client: BrokerClientChannel,
    2401            0 :     ) {
    2402            0 :         info!(
    2403            0 :             "launching WAL receiver for timeline {} of tenant {}",
    2404            0 :             self.timeline_id, self.tenant_shard_id
    2405              :         );
    2406              : 
    2407            0 :         let tenant_conf = self.tenant_conf.load();
    2408            0 :         let wal_connect_timeout = tenant_conf
    2409            0 :             .tenant_conf
    2410            0 :             .walreceiver_connect_timeout
    2411            0 :             .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
    2412            0 :         let lagging_wal_timeout = tenant_conf
    2413            0 :             .tenant_conf
    2414            0 :             .lagging_wal_timeout
    2415            0 :             .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
    2416            0 :         let max_lsn_wal_lag = tenant_conf
    2417            0 :             .tenant_conf
    2418            0 :             .max_lsn_wal_lag
    2419            0 :             .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
    2420            0 : 
    2421            0 :         let mut guard = self.walreceiver.lock().unwrap();
    2422            0 :         assert!(
    2423            0 :             guard.is_none(),
    2424            0 :             "multiple launches / re-launches of WAL receiver are not supported"
    2425              :         );
    2426            0 :         *guard = Some(WalReceiver::start(
    2427            0 :             Arc::clone(self),
    2428            0 :             WalReceiverConf {
    2429            0 :                 wal_connect_timeout,
    2430            0 :                 lagging_wal_timeout,
    2431            0 :                 max_lsn_wal_lag,
    2432            0 :                 auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
    2433            0 :                 availability_zone: self.conf.availability_zone.clone(),
    2434            0 :                 ingest_batch_size: self.conf.ingest_batch_size,
    2435            0 :             },
    2436            0 :             broker_client,
    2437            0 :             ctx,
    2438            0 :         ));
    2439            0 :     }
    2440              : 
    2441              :     /// Initialize with an empty layer map. Used when creating a new timeline.
    2442          412 :     pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
    2443          412 :         let mut layers = self.layers.try_write().expect(
    2444          412 :             "in the context where we call this function, no other task has access to the object",
    2445          412 :         );
    2446          412 :         layers
    2447          412 :             .open_mut()
    2448          412 :             .expect("in this context the LayerManager must still be open")
    2449          412 :             .initialize_empty(Lsn(start_lsn.0));
    2450          412 :     }
    2451              : 
    2452              :     /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
    2453              :     /// files.
    2454            6 :     pub(super) async fn load_layer_map(
    2455            6 :         &self,
    2456            6 :         disk_consistent_lsn: Lsn,
    2457            6 :         index_part: IndexPart,
    2458            6 :     ) -> anyhow::Result<()> {
    2459              :         use init::{Decision::*, Discovered, DismissedLayer};
    2460              :         use LayerName::*;
    2461              : 
    2462            6 :         let mut guard = self.layers.write().await;
    2463              : 
    2464            6 :         let timer = self.metrics.load_layer_map_histo.start_timer();
    2465            6 : 
    2466            6 :         // Scan timeline directory and create ImageLayerName and DeltaFilename
    2467            6 :         // structs representing all files on disk
    2468            6 :         let timeline_path = self
    2469            6 :             .conf
    2470            6 :             .timeline_path(&self.tenant_shard_id, &self.timeline_id);
    2471            6 :         let conf = self.conf;
    2472            6 :         let span = tracing::Span::current();
    2473            6 : 
    2474            6 :         // Copy to move into the task we're about to spawn
    2475            6 :         let this = self.myself.upgrade().expect("&self method holds the arc");
    2476              : 
    2477            6 :         let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
    2478            6 :             move || {
    2479            6 :                 let _g = span.entered();
    2480            6 :                 let discovered = init::scan_timeline_dir(&timeline_path)?;
    2481            6 :                 let mut discovered_layers = Vec::with_capacity(discovered.len());
    2482            6 :                 let mut unrecognized_files = Vec::new();
    2483            6 : 
    2484            6 :                 let mut path = timeline_path;
    2485              : 
    2486           22 :                 for discovered in discovered {
    2487           16 :                     let (name, kind) = match discovered {
    2488           16 :                         Discovered::Layer(layer_file_name, local_metadata) => {
    2489           16 :                             discovered_layers.push((layer_file_name, local_metadata));
    2490           16 :                             continue;
    2491              :                         }
    2492            0 :                         Discovered::IgnoredBackup(path) => {
    2493            0 :                             std::fs::remove_file(path)
    2494            0 :                                 .or_else(fs_ext::ignore_not_found)
    2495            0 :                                 .fatal_err("Removing .old file");
    2496            0 :                             continue;
    2497              :                         }
    2498            0 :                         Discovered::Unknown(file_name) => {
    2499            0 :                             // we will later error if there are any
    2500            0 :                             unrecognized_files.push(file_name);
    2501            0 :                             continue;
    2502              :                         }
    2503            0 :                         Discovered::Ephemeral(name) => (name, "old ephemeral file"),
    2504            0 :                         Discovered::Temporary(name) => (name, "temporary timeline file"),
    2505            0 :                         Discovered::TemporaryDownload(name) => (name, "temporary download"),
    2506              :                     };
    2507            0 :                     path.push(Utf8Path::new(&name));
    2508            0 :                     init::cleanup(&path, kind)?;
    2509            0 :                     path.pop();
    2510              :                 }
    2511              : 
    2512            6 :                 if !unrecognized_files.is_empty() {
    2513              :                     // assume that if there are any there are many many.
    2514            0 :                     let n = unrecognized_files.len();
    2515            0 :                     let first = &unrecognized_files[..n.min(10)];
    2516            0 :                     anyhow::bail!(
    2517            0 :                         "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
    2518            0 :                     );
    2519            6 :                 }
    2520            6 : 
    2521            6 :                 let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
    2522            6 : 
    2523            6 :                 let mut loaded_layers = Vec::new();
    2524            6 :                 let mut needs_cleanup = Vec::new();
    2525            6 :                 let mut total_physical_size = 0;
    2526              : 
    2527           22 :                 for (name, decision) in decided {
    2528           16 :                     let decision = match decision {
    2529           16 :                         Ok(decision) => decision,
    2530            0 :                         Err(DismissedLayer::Future { local }) => {
    2531            0 :                             if let Some(local) = local {
    2532            0 :                                 init::cleanup_future_layer(
    2533            0 :                                     &local.local_path,
    2534            0 :                                     &name,
    2535            0 :                                     disk_consistent_lsn,
    2536            0 :                                 )?;
    2537            0 :                             }
    2538            0 :                             needs_cleanup.push(name);
    2539            0 :                             continue;
    2540              :                         }
    2541            0 :                         Err(DismissedLayer::LocalOnly(local)) => {
    2542            0 :                             init::cleanup_local_only_file(&name, &local)?;
    2543              :                             // this file never existed remotely, we will have to do rework
    2544            0 :                             continue;
    2545              :                         }
    2546            0 :                         Err(DismissedLayer::BadMetadata(local)) => {
    2547            0 :                             init::cleanup_local_file_for_remote(&local)?;
    2548              :                             // this file never existed remotely, we will have to do rework
    2549            0 :                             continue;
    2550              :                         }
    2551              :                     };
    2552              : 
    2553           16 :                     match &name {
    2554           12 :                         Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
    2555            4 :                         Image(i) => assert!(i.lsn <= disk_consistent_lsn),
    2556              :                     }
    2557              : 
    2558           16 :                     tracing::debug!(layer=%name, ?decision, "applied");
    2559              : 
    2560           16 :                     let layer = match decision {
    2561           16 :                         Resident { local, remote } => {
    2562           16 :                             total_physical_size += local.file_size;
    2563           16 :                             Layer::for_resident(conf, &this, local.local_path, name, remote)
    2564           16 :                                 .drop_eviction_guard()
    2565              :                         }
    2566            0 :                         Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
    2567              :                     };
    2568              : 
    2569           16 :                     loaded_layers.push(layer);
    2570              :                 }
    2571            6 :                 Ok((loaded_layers, needs_cleanup, total_physical_size))
    2572            6 :             }
    2573            6 :         })
    2574            3 :         .await
    2575            6 :         .map_err(anyhow::Error::new)
    2576            6 :         .and_then(|x| x)?;
    2577              : 
    2578            6 :         let num_layers = loaded_layers.len();
    2579            6 : 
    2580            6 :         guard
    2581            6 :             .open_mut()
    2582            6 :             .expect("layermanager must be open during init")
    2583            6 :             .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
    2584            6 : 
    2585            6 :         self.remote_client
    2586            6 :             .schedule_layer_file_deletion(&needs_cleanup)?;
    2587            6 :         self.remote_client
    2588            6 :             .schedule_index_upload_for_file_changes()?;
    2589              :         // This barrier orders above DELETEs before any later operations.
    2590              :         // This is critical because code executing after the barrier might
    2591              :         // create again objects with the same key that we just scheduled for deletion.
    2592              :         // For example, if we just scheduled deletion of an image layer "from the future",
    2593              :         // later compaction might run again and re-create the same image layer.
    2594              :         // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
    2595              :         // "same" here means same key range and LSN.
    2596              :         //
    2597              :         // Without a barrier between above DELETEs and the re-creation's PUTs,
    2598              :         // the upload queue may execute the PUT first, then the DELETE.
    2599              :         // In our example, we will end up with an IndexPart referencing a non-existent object.
    2600              :         //
    2601              :         // 1. a future image layer is created and uploaded
    2602              :         // 2. ps restart
    2603              :         // 3. the future layer from (1) is deleted during load layer map
    2604              :         // 4. image layer is re-created and uploaded
    2605              :         // 5. deletion queue would like to delete (1) but actually deletes (4)
    2606              :         // 6. delete by name works as expected, but it now deletes the wrong (later) version
    2607              :         //
    2608              :         // See https://github.com/neondatabase/neon/issues/5878
    2609              :         //
    2610              :         // NB: generation numbers naturally protect against this because they disambiguate
    2611              :         //     (1) and (4)
    2612            6 :         self.remote_client.schedule_barrier()?;
    2613              :         // Tenant::create_timeline will wait for these uploads to happen before returning, or
    2614              :         // on retry.
    2615              : 
    2616              :         // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
    2617            6 :         drop(guard); // drop write lock, update_layer_visibility will take a read lock.
    2618            6 :         self.update_layer_visibility().await?;
    2619              : 
    2620            6 :         info!(
    2621            0 :             "loaded layer map with {} layers at {}, total physical size: {}",
    2622              :             num_layers, disk_consistent_lsn, total_physical_size
    2623              :         );
    2624              : 
    2625            6 :         timer.stop_and_record();
    2626            6 :         Ok(())
    2627            6 :     }
    2628              : 
    2629              :     /// Retrieve current logical size of the timeline.
    2630              :     ///
    2631              :     /// The size could be lagging behind the actual number, in case
    2632              :     /// the initial size calculation has not been run (gets triggered on the first size access).
    2633              :     ///
    2634              :     /// return size and boolean flag that shows if the size is exact
    2635            0 :     pub(crate) fn get_current_logical_size(
    2636            0 :         self: &Arc<Self>,
    2637            0 :         priority: GetLogicalSizePriority,
    2638            0 :         ctx: &RequestContext,
    2639            0 :     ) -> logical_size::CurrentLogicalSize {
    2640            0 :         if !self.tenant_shard_id.is_shard_zero() {
    2641              :             // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
    2642              :             // when HTTP API is serving a GET for timeline zero, return zero
    2643            0 :             return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
    2644            0 :         }
    2645            0 : 
    2646            0 :         let current_size = self.current_logical_size.current_size();
    2647            0 :         debug!("Current size: {current_size:?}");
    2648              : 
    2649            0 :         match (current_size.accuracy(), priority) {
    2650            0 :             (logical_size::Accuracy::Exact, _) => (), // nothing to do
    2651            0 :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
    2652            0 :                 // background task will eventually deliver an exact value, we're in no rush
    2653            0 :             }
    2654              :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
    2655              :                 // background task is not ready, but user is asking for it now;
    2656              :                 // => make the background task skip the line
    2657              :                 // (The alternative would be to calculate the size here, but,
    2658              :                 //  it can actually take a long time if the user has a lot of rels.
    2659              :                 //  And we'll inevitable need it again; So, let the background task do the work.)
    2660            0 :                 match self
    2661            0 :                     .current_logical_size
    2662            0 :                     .cancel_wait_for_background_loop_concurrency_limit_semaphore
    2663            0 :                     .get()
    2664              :                 {
    2665            0 :                     Some(cancel) => cancel.cancel(),
    2666              :                     None => {
    2667            0 :                         let state = self.current_state();
    2668            0 :                         if matches!(
    2669            0 :                             state,
    2670              :                             TimelineState::Broken { .. } | TimelineState::Stopping
    2671            0 :                         ) {
    2672            0 : 
    2673            0 :                             // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
    2674            0 :                             // Don't make noise.
    2675            0 :                         } else {
    2676            0 :                             warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
    2677            0 :                             debug_assert!(false);
    2678              :                         }
    2679              :                     }
    2680              :                 };
    2681              :             }
    2682              :         }
    2683              : 
    2684            0 :         if let CurrentLogicalSize::Approximate(_) = &current_size {
    2685            0 :             if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
    2686            0 :                 let first = self
    2687            0 :                     .current_logical_size
    2688            0 :                     .did_return_approximate_to_walreceiver
    2689            0 :                     .compare_exchange(
    2690            0 :                         false,
    2691            0 :                         true,
    2692            0 :                         AtomicOrdering::Relaxed,
    2693            0 :                         AtomicOrdering::Relaxed,
    2694            0 :                     )
    2695            0 :                     .is_ok();
    2696            0 :                 if first {
    2697            0 :                     crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
    2698            0 :                 }
    2699            0 :             }
    2700            0 :         }
    2701              : 
    2702            0 :         current_size
    2703            0 :     }
    2704              : 
    2705            0 :     fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
    2706            0 :         let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
    2707              :             // nothing to do for freshly created timelines;
    2708            0 :             assert_eq!(
    2709            0 :                 self.current_logical_size.current_size().accuracy(),
    2710            0 :                 logical_size::Accuracy::Exact,
    2711            0 :             );
    2712            0 :             self.current_logical_size.initialized.add_permits(1);
    2713            0 :             return;
    2714              :         };
    2715              : 
    2716            0 :         let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
    2717            0 :         let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
    2718            0 :         self.current_logical_size
    2719            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
    2720            0 :             .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
    2721            0 : 
    2722            0 :         let self_clone = Arc::clone(self);
    2723            0 :         let background_ctx = ctx.detached_child(
    2724            0 :             TaskKind::InitialLogicalSizeCalculation,
    2725            0 :             DownloadBehavior::Download,
    2726            0 :         );
    2727            0 :         task_mgr::spawn(
    2728            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2729            0 :             task_mgr::TaskKind::InitialLogicalSizeCalculation,
    2730            0 :             self.tenant_shard_id,
    2731            0 :             Some(self.timeline_id),
    2732            0 :             "initial size calculation",
    2733              :             // NB: don't log errors here, task_mgr will do that.
    2734            0 :             async move {
    2735            0 :                 let cancel = task_mgr::shutdown_token();
    2736            0 :                 self_clone
    2737            0 :                     .initial_logical_size_calculation_task(
    2738            0 :                         initial_part_end,
    2739            0 :                         cancel_wait_for_background_loop_concurrency_limit_semaphore,
    2740            0 :                         cancel,
    2741            0 :                         background_ctx,
    2742            0 :                     )
    2743            0 :                     .await;
    2744            0 :                 Ok(())
    2745            0 :             }
    2746            0 :             .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
    2747              :         );
    2748            0 :     }
    2749              : 
    2750            0 :     async fn initial_logical_size_calculation_task(
    2751            0 :         self: Arc<Self>,
    2752            0 :         initial_part_end: Lsn,
    2753            0 :         skip_concurrency_limiter: CancellationToken,
    2754            0 :         cancel: CancellationToken,
    2755            0 :         background_ctx: RequestContext,
    2756            0 :     ) {
    2757            0 :         scopeguard::defer! {
    2758            0 :             // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
    2759            0 :             self.current_logical_size.initialized.add_permits(1);
    2760            0 :         }
    2761            0 : 
    2762            0 :         let try_once = |attempt: usize| {
    2763            0 :             let background_ctx = &background_ctx;
    2764            0 :             let self_ref = &self;
    2765            0 :             let skip_concurrency_limiter = &skip_concurrency_limiter;
    2766            0 :             async move {
    2767            0 :                 let cancel = task_mgr::shutdown_token();
    2768            0 :                 let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    2769            0 :                     BackgroundLoopKind::InitialLogicalSizeCalculation,
    2770            0 :                     background_ctx,
    2771            0 :                 );
    2772              : 
    2773              :                 use crate::metrics::initial_logical_size::StartCircumstances;
    2774            0 :                 let (_maybe_permit, circumstances) = tokio::select! {
    2775            0 :                     permit = wait_for_permit => {
    2776            0 :                         (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
    2777              :                     }
    2778            0 :                     _ = self_ref.cancel.cancelled() => {
    2779            0 :                         return Err(CalculateLogicalSizeError::Cancelled);
    2780              :                     }
    2781            0 :                     _ = cancel.cancelled() => {
    2782            0 :                         return Err(CalculateLogicalSizeError::Cancelled);
    2783              :                     },
    2784            0 :                     () = skip_concurrency_limiter.cancelled() => {
    2785              :                         // Some action that is part of a end user interaction requested logical size
    2786              :                         // => break out of the rate limit
    2787              :                         // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
    2788              :                         // but then again what happens if they cancel; also, we should just be using
    2789              :                         // one runtime across the entire process, so, let's leave this for now.
    2790            0 :                         (None, StartCircumstances::SkippedConcurrencyLimiter)
    2791              :                     }
    2792              :                 };
    2793              : 
    2794            0 :                 let metrics_guard = if attempt == 1 {
    2795            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
    2796              :                 } else {
    2797            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
    2798              :                 };
    2799              : 
    2800            0 :                 let calculated_size = self_ref
    2801            0 :                     .logical_size_calculation_task(
    2802            0 :                         initial_part_end,
    2803            0 :                         LogicalSizeCalculationCause::Initial,
    2804            0 :                         background_ctx,
    2805            0 :                     )
    2806            0 :                     .await?;
    2807              : 
    2808            0 :                 self_ref
    2809            0 :                     .trigger_aux_file_size_computation(initial_part_end, background_ctx)
    2810            0 :                     .await?;
    2811              : 
    2812              :                 // TODO: add aux file size to logical size
    2813              : 
    2814            0 :                 Ok((calculated_size, metrics_guard))
    2815            0 :             }
    2816            0 :         };
    2817              : 
    2818            0 :         let retrying = async {
    2819            0 :             let mut attempt = 0;
    2820              :             loop {
    2821            0 :                 attempt += 1;
    2822            0 : 
    2823            0 :                 match try_once(attempt).await {
    2824            0 :                     Ok(res) => return ControlFlow::Continue(res),
    2825            0 :                     Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
    2826              :                     Err(
    2827            0 :                         e @ (CalculateLogicalSizeError::Decode(_)
    2828            0 :                         | CalculateLogicalSizeError::PageRead(_)),
    2829            0 :                     ) => {
    2830            0 :                         warn!(attempt, "initial size calculation failed: {e:?}");
    2831              :                         // exponential back-off doesn't make sense at these long intervals;
    2832              :                         // use fixed retry interval with generous jitter instead
    2833            0 :                         let sleep_duration = Duration::from_secs(
    2834            0 :                             u64::try_from(
    2835            0 :                                 // 1hour base
    2836            0 :                                 (60_i64 * 60_i64)
    2837            0 :                                     // 10min jitter
    2838            0 :                                     + rand::thread_rng().gen_range(-10 * 60..10 * 60),
    2839            0 :                             )
    2840            0 :                             .expect("10min < 1hour"),
    2841            0 :                         );
    2842            0 :                         tokio::time::sleep(sleep_duration).await;
    2843              :                     }
    2844              :                 }
    2845              :             }
    2846            0 :         };
    2847              : 
    2848            0 :         let (calculated_size, metrics_guard) = tokio::select! {
    2849            0 :             res = retrying  => {
    2850            0 :                 match res {
    2851            0 :                     ControlFlow::Continue(calculated_size) => calculated_size,
    2852            0 :                     ControlFlow::Break(()) => return,
    2853              :                 }
    2854              :             }
    2855            0 :             _ = cancel.cancelled() => {
    2856            0 :                 return;
    2857              :             }
    2858              :         };
    2859              : 
    2860              :         // we cannot query current_logical_size.current_size() to know the current
    2861              :         // *negative* value, only truncated to u64.
    2862            0 :         let added = self
    2863            0 :             .current_logical_size
    2864            0 :             .size_added_after_initial
    2865            0 :             .load(AtomicOrdering::Relaxed);
    2866            0 : 
    2867            0 :         let sum = calculated_size.saturating_add_signed(added);
    2868            0 : 
    2869            0 :         // set the gauge value before it can be set in `update_current_logical_size`.
    2870            0 :         self.metrics.current_logical_size_gauge.set(sum);
    2871            0 : 
    2872            0 :         self.current_logical_size
    2873            0 :             .initial_logical_size
    2874            0 :             .set((calculated_size, metrics_guard.calculation_result_saved()))
    2875            0 :             .ok()
    2876            0 :             .expect("only this task sets it");
    2877            0 :     }
    2878              : 
    2879            0 :     pub(crate) fn spawn_ondemand_logical_size_calculation(
    2880            0 :         self: &Arc<Self>,
    2881            0 :         lsn: Lsn,
    2882            0 :         cause: LogicalSizeCalculationCause,
    2883            0 :         ctx: RequestContext,
    2884            0 :     ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
    2885            0 :         let (sender, receiver) = oneshot::channel();
    2886            0 :         let self_clone = Arc::clone(self);
    2887            0 :         // XXX if our caller loses interest, i.e., ctx is cancelled,
    2888            0 :         // we should stop the size calculation work and return an error.
    2889            0 :         // That would require restructuring this function's API to
    2890            0 :         // return the result directly, instead of a Receiver for the result.
    2891            0 :         let ctx = ctx.detached_child(
    2892            0 :             TaskKind::OndemandLogicalSizeCalculation,
    2893            0 :             DownloadBehavior::Download,
    2894            0 :         );
    2895            0 :         task_mgr::spawn(
    2896            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2897            0 :             task_mgr::TaskKind::OndemandLogicalSizeCalculation,
    2898            0 :             self.tenant_shard_id,
    2899            0 :             Some(self.timeline_id),
    2900            0 :             "ondemand logical size calculation",
    2901            0 :             async move {
    2902            0 :                 let res = self_clone
    2903            0 :                     .logical_size_calculation_task(lsn, cause, &ctx)
    2904            0 :                     .await;
    2905            0 :                 let _ = sender.send(res).ok();
    2906            0 :                 Ok(()) // Receiver is responsible for handling errors
    2907            0 :             }
    2908            0 :             .in_current_span(),
    2909            0 :         );
    2910            0 :         receiver
    2911            0 :     }
    2912              : 
    2913              :     /// # Cancel-Safety
    2914              :     ///
    2915              :     /// This method is cancellation-safe.
    2916            0 :     #[instrument(skip_all)]
    2917              :     async fn logical_size_calculation_task(
    2918              :         self: &Arc<Self>,
    2919              :         lsn: Lsn,
    2920              :         cause: LogicalSizeCalculationCause,
    2921              :         ctx: &RequestContext,
    2922              :     ) -> Result<u64, CalculateLogicalSizeError> {
    2923              :         crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
    2924              :         // We should never be calculating logical sizes on shard !=0, because these shards do not have
    2925              :         // accurate relation sizes, and they do not emit consumption metrics.
    2926              :         debug_assert!(self.tenant_shard_id.is_shard_zero());
    2927              : 
    2928              :         let guard = self
    2929              :             .gate
    2930              :             .enter()
    2931            0 :             .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
    2932              : 
    2933              :         let self_calculation = Arc::clone(self);
    2934              : 
    2935            0 :         let mut calculation = pin!(async {
    2936            0 :             let ctx = ctx.attached_child();
    2937            0 :             self_calculation
    2938            0 :                 .calculate_logical_size(lsn, cause, &guard, &ctx)
    2939            0 :                 .await
    2940            0 :         });
    2941              : 
    2942              :         tokio::select! {
    2943              :             res = &mut calculation => { res }
    2944              :             _ = self.cancel.cancelled() => {
    2945              :                 debug!("cancelling logical size calculation for timeline shutdown");
    2946              :                 calculation.await
    2947              :             }
    2948              :         }
    2949              :     }
    2950              : 
    2951              :     /// Calculate the logical size of the database at the latest LSN.
    2952              :     ///
    2953              :     /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
    2954              :     /// especially if we need to download remote layers.
    2955              :     ///
    2956              :     /// # Cancel-Safety
    2957              :     ///
    2958              :     /// This method is cancellation-safe.
    2959            0 :     async fn calculate_logical_size(
    2960            0 :         &self,
    2961            0 :         up_to_lsn: Lsn,
    2962            0 :         cause: LogicalSizeCalculationCause,
    2963            0 :         _guard: &GateGuard,
    2964            0 :         ctx: &RequestContext,
    2965            0 :     ) -> Result<u64, CalculateLogicalSizeError> {
    2966            0 :         info!(
    2967            0 :             "Calculating logical size for timeline {} at {}",
    2968              :             self.timeline_id, up_to_lsn
    2969              :         );
    2970              : 
    2971            0 :         pausable_failpoint!("timeline-calculate-logical-size-pause");
    2972              : 
    2973              :         // See if we've already done the work for initial size calculation.
    2974              :         // This is a short-cut for timelines that are mostly unused.
    2975            0 :         if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
    2976            0 :             return Ok(size);
    2977            0 :         }
    2978            0 :         let storage_time_metrics = match cause {
    2979              :             LogicalSizeCalculationCause::Initial
    2980              :             | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
    2981            0 :             | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
    2982              :             LogicalSizeCalculationCause::EvictionTaskImitation => {
    2983            0 :                 &self.metrics.imitate_logical_size_histo
    2984              :             }
    2985              :         };
    2986            0 :         let timer = storage_time_metrics.start_timer();
    2987            0 :         let logical_size = self
    2988            0 :             .get_current_logical_size_non_incremental(up_to_lsn, ctx)
    2989            0 :             .await?;
    2990            0 :         debug!("calculated logical size: {logical_size}");
    2991            0 :         timer.stop_and_record();
    2992            0 :         Ok(logical_size)
    2993            0 :     }
    2994              : 
    2995              :     /// Update current logical size, adding `delta' to the old value.
    2996       270570 :     fn update_current_logical_size(&self, delta: i64) {
    2997       270570 :         let logical_size = &self.current_logical_size;
    2998       270570 :         logical_size.increment_size(delta);
    2999       270570 : 
    3000       270570 :         // Also set the value in the prometheus gauge. Note that
    3001       270570 :         // there is a race condition here: if this is is called by two
    3002       270570 :         // threads concurrently, the prometheus gauge might be set to
    3003       270570 :         // one value while current_logical_size is set to the
    3004       270570 :         // other.
    3005       270570 :         match logical_size.current_size() {
    3006       270570 :             CurrentLogicalSize::Exact(ref new_current_size) => self
    3007       270570 :                 .metrics
    3008       270570 :                 .current_logical_size_gauge
    3009       270570 :                 .set(new_current_size.into()),
    3010            0 :             CurrentLogicalSize::Approximate(_) => {
    3011            0 :                 // don't update the gauge yet, this allows us not to update the gauge back and
    3012            0 :                 // forth between the initial size calculation task.
    3013            0 :             }
    3014              :         }
    3015       270570 :     }
    3016              : 
    3017         2834 :     pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
    3018         2834 :         self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
    3019         2834 :         let aux_metric =
    3020         2834 :             self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
    3021         2834 : 
    3022         2834 :         let sum_of_entries = self
    3023         2834 :             .directory_metrics
    3024         2834 :             .iter()
    3025        19838 :             .map(|v| v.load(AtomicOrdering::Relaxed))
    3026         2834 :             .sum();
    3027              :         // Set a high general threshold and a lower threshold for the auxiliary files,
    3028              :         // as we can have large numbers of relations in the db directory.
    3029              :         const SUM_THRESHOLD: u64 = 5000;
    3030              :         const AUX_THRESHOLD: u64 = 1000;
    3031         2834 :         if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
    3032            0 :             self.metrics
    3033            0 :                 .directory_entries_count_gauge
    3034            0 :                 .set(sum_of_entries);
    3035         2834 :         } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
    3036            0 :             metric.set(sum_of_entries);
    3037         2834 :         }
    3038         2834 :     }
    3039              : 
    3040            0 :     async fn find_layer(
    3041            0 :         &self,
    3042            0 :         layer_name: &LayerName,
    3043            0 :     ) -> Result<Option<Layer>, layer_manager::Shutdown> {
    3044            0 :         let guard = self.layers.read().await;
    3045            0 :         let layer = guard
    3046            0 :             .layer_map()?
    3047            0 :             .iter_historic_layers()
    3048            0 :             .find(|l| &l.layer_name() == layer_name)
    3049            0 :             .map(|found| guard.get_from_desc(&found));
    3050            0 :         Ok(layer)
    3051            0 :     }
    3052              : 
    3053              :     /// The timeline heatmap is a hint to secondary locations from the primary location,
    3054              :     /// indicating which layers are currently on-disk on the primary.
    3055              :     ///
    3056              :     /// None is returned if the Timeline is in a state where uploading a heatmap
    3057              :     /// doesn't make sense, such as shutting down or initializing.  The caller
    3058              :     /// should treat this as a cue to simply skip doing any heatmap uploading
    3059              :     /// for this timeline.
    3060            2 :     pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
    3061            2 :         if !self.is_active() {
    3062            0 :             return None;
    3063            2 :         }
    3064              : 
    3065            2 :         let guard = self.layers.read().await;
    3066              : 
    3067           10 :         let resident = guard.likely_resident_layers().filter_map(|layer| {
    3068           10 :             match layer.visibility() {
    3069              :                 LayerVisibilityHint::Visible => {
    3070              :                     // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
    3071            8 :                     let last_activity_ts = layer.latest_activity();
    3072            8 :                     Some((layer.layer_desc(), layer.metadata(), last_activity_ts))
    3073              :                 }
    3074              :                 LayerVisibilityHint::Covered => {
    3075              :                     // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
    3076            2 :                     None
    3077              :                 }
    3078              :             }
    3079           10 :         });
    3080            2 : 
    3081            2 :         let mut layers = resident.collect::<Vec<_>>();
    3082            2 : 
    3083            2 :         // Sort layers in order of which to download first.  For a large set of layers to download, we
    3084            2 :         // want to prioritize those layers which are most likely to still be in the resident many minutes
    3085            2 :         // or hours later:
    3086            2 :         // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
    3087            2 :         //   only exist for a few minutes before being compacted into L1s.
    3088            2 :         // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
    3089            2 :         //   the layer is likely to be covered by an image layer during compaction.
    3090           20 :         layers.sort_by_key(|(desc, _meta, _atime)| {
    3091           20 :             std::cmp::Reverse((
    3092           20 :                 !LayerMap::is_l0(&desc.key_range, desc.is_delta),
    3093           20 :                 desc.lsn_range.end,
    3094           20 :             ))
    3095           20 :         });
    3096            2 : 
    3097            2 :         let layers = layers
    3098            2 :             .into_iter()
    3099            8 :             .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
    3100            2 :             .collect();
    3101            2 : 
    3102            2 :         Some(HeatMapTimeline::new(self.timeline_id, layers))
    3103            2 :     }
    3104              : 
    3105              :     /// Returns true if the given lsn is or was an ancestor branchpoint.
    3106            0 :     pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
    3107            0 :         // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
    3108            0 :         // branchpoint in the value in IndexPart::lineage
    3109            0 :         self.ancestor_lsn == lsn
    3110            0 :             || (self.ancestor_lsn == Lsn::INVALID
    3111            0 :                 && self.remote_client.is_previous_ancestor_lsn(lsn))
    3112            0 :     }
    3113              : }
    3114              : 
    3115              : impl Timeline {
    3116              :     #[allow(clippy::doc_lazy_continuation)]
    3117              :     /// Get the data needed to reconstruct all keys in the provided keyspace
    3118              :     ///
    3119              :     /// The algorithm is as follows:
    3120              :     /// 1.   While some keys are still not done and there's a timeline to visit:
    3121              :     /// 2.   Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
    3122              :     /// 2.1: Build the fringe for the current keyspace
    3123              :     /// 2.2  Visit the newest layer from the fringe to collect all values for the range it
    3124              :     ///      intersects
    3125              :     /// 2.3. Pop the timeline from the fringe
    3126              :     /// 2.4. If the fringe is empty, go back to 1
    3127       626570 :     async fn get_vectored_reconstruct_data(
    3128       626570 :         &self,
    3129       626570 :         mut keyspace: KeySpace,
    3130       626570 :         request_lsn: Lsn,
    3131       626570 :         reconstruct_state: &mut ValuesReconstructState,
    3132       626570 :         ctx: &RequestContext,
    3133       626570 :     ) -> Result<(), GetVectoredError> {
    3134       626570 :         let mut timeline_owned: Arc<Timeline>;
    3135       626570 :         let mut timeline = self;
    3136       626570 : 
    3137       626570 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3138              : 
    3139       626568 :         let missing_keyspace = loop {
    3140       851424 :             if self.cancel.is_cancelled() {
    3141            0 :                 return Err(GetVectoredError::Cancelled);
    3142       851424 :             }
    3143              : 
    3144              :             let TimelineVisitOutcome {
    3145       851424 :                 completed_keyspace: completed,
    3146       851424 :                 image_covered_keyspace,
    3147       851424 :             } = Self::get_vectored_reconstruct_data_timeline(
    3148       851424 :                 timeline,
    3149       851424 :                 keyspace.clone(),
    3150       851424 :                 cont_lsn,
    3151       851424 :                 reconstruct_state,
    3152       851424 :                 &self.cancel,
    3153       851424 :                 ctx,
    3154       851424 :             )
    3155       192204 :             .await?;
    3156              : 
    3157       851424 :             keyspace.remove_overlapping_with(&completed);
    3158       851424 : 
    3159       851424 :             // Do not descend into the ancestor timeline for aux files.
    3160       851424 :             // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
    3161       851424 :             // stalling compaction.
    3162       851424 :             keyspace.remove_overlapping_with(&KeySpace {
    3163       851424 :                 ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
    3164       851424 :             });
    3165       851424 : 
    3166       851424 :             // Keyspace is fully retrieved
    3167       851424 :             if keyspace.is_empty() {
    3168       626554 :                 break None;
    3169       224870 :             }
    3170              : 
    3171       224870 :             let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
    3172              :                 // Not fully retrieved but no ancestor timeline.
    3173           14 :                 break Some(keyspace);
    3174              :             };
    3175              : 
    3176              :             // Now we see if there are keys covered by the image layer but does not exist in the
    3177              :             // image layer, which means that the key does not exist.
    3178              : 
    3179              :             // The block below will stop the vectored search if any of the keys encountered an image layer
    3180              :             // which did not contain a snapshot for said key. Since we have already removed all completed
    3181              :             // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
    3182              :             // space. If that's not the case, we had at least one key encounter a gap in the image layer
    3183              :             // and stop the search as a result of that.
    3184       224856 :             let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
    3185       224856 :             if !removed.is_empty() {
    3186            0 :                 break Some(removed);
    3187       224856 :             }
    3188       224856 :             // If we reached this point, `remove_overlapping_with` should not have made any change to the
    3189       224856 :             // keyspace.
    3190       224856 : 
    3191       224856 :             // Take the min to avoid reconstructing a page with data newer than request Lsn.
    3192       224856 :             cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
    3193       224856 :             timeline_owned = timeline
    3194       224856 :                 .get_ready_ancestor_timeline(ancestor_timeline, ctx)
    3195            2 :                 .await?;
    3196       224854 :             timeline = &*timeline_owned;
    3197              :         };
    3198              : 
    3199       626568 :         if let Some(missing_keyspace) = missing_keyspace {
    3200           14 :             return Err(GetVectoredError::MissingKey(MissingKeyError {
    3201           14 :                 key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
    3202           14 :                 shard: self
    3203           14 :                     .shard_identity
    3204           14 :                     .get_shard_number(&missing_keyspace.start().unwrap()),
    3205           14 :                 cont_lsn,
    3206           14 :                 request_lsn,
    3207           14 :                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3208           14 :                 backtrace: None,
    3209           14 :             }));
    3210       626554 :         }
    3211       626554 : 
    3212       626554 :         Ok(())
    3213       626570 :     }
    3214              : 
    3215              :     /// Collect the reconstruct data for a keyspace from the specified timeline.
    3216              :     ///
    3217              :     /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
    3218              :     /// the current keyspace. The current keyspace of the search at any given timeline
    3219              :     /// is the original keyspace minus all the keys that have been completed minus
    3220              :     /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
    3221              :     /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
    3222              :     ///
    3223              :     /// This is basically a depth-first search visitor implementation where a vertex
    3224              :     /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
    3225              :     ///
    3226              :     /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
    3227              :     /// and get all the required reconstruct data from the layer in one go.
    3228              :     ///
    3229              :     /// Returns the completed keyspace and the keyspaces with image coverage. The caller
    3230              :     /// decides how to deal with these two keyspaces.
    3231       851424 :     async fn get_vectored_reconstruct_data_timeline(
    3232       851424 :         timeline: &Timeline,
    3233       851424 :         keyspace: KeySpace,
    3234       851424 :         mut cont_lsn: Lsn,
    3235       851424 :         reconstruct_state: &mut ValuesReconstructState,
    3236       851424 :         cancel: &CancellationToken,
    3237       851424 :         ctx: &RequestContext,
    3238       851424 :     ) -> Result<TimelineVisitOutcome, GetVectoredError> {
    3239       851424 :         let mut unmapped_keyspace = keyspace.clone();
    3240       851424 :         let mut fringe = LayerFringe::new();
    3241       851424 : 
    3242       851424 :         let mut completed_keyspace = KeySpace::default();
    3243       851424 :         let mut image_covered_keyspace = KeySpaceRandomAccum::new();
    3244              : 
    3245              :         loop {
    3246      1696854 :             if cancel.is_cancelled() {
    3247            0 :                 return Err(GetVectoredError::Cancelled);
    3248      1696854 :             }
    3249      1696854 : 
    3250      1696854 :             let (keys_done_last_step, keys_with_image_coverage) =
    3251      1696854 :                 reconstruct_state.consume_done_keys();
    3252      1696854 :             unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
    3253      1696854 :             completed_keyspace.merge(&keys_done_last_step);
    3254      1696854 :             if let Some(keys_with_image_coverage) = keys_with_image_coverage {
    3255        21808 :                 unmapped_keyspace
    3256        21808 :                     .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
    3257        21808 :                 image_covered_keyspace.add_range(keys_with_image_coverage);
    3258      1675046 :             }
    3259              : 
    3260              :             // Do not descent any further if the last layer we visited
    3261              :             // completed all keys in the keyspace it inspected. This is not
    3262              :             // required for correctness, but avoids visiting extra layers
    3263              :             // which turns out to be a perf bottleneck in some cases.
    3264      1696854 :             if !unmapped_keyspace.is_empty() {
    3265      1072544 :                 let guard = timeline.layers.read().await;
    3266      1072544 :                 let layers = guard.layer_map()?;
    3267              : 
    3268      1072544 :                 let in_memory_layer = layers.find_in_memory_layer(|l| {
    3269       912921 :                     let start_lsn = l.get_lsn_range().start;
    3270       912921 :                     cont_lsn > start_lsn
    3271      1072544 :                 });
    3272      1072544 : 
    3273      1072544 :                 match in_memory_layer {
    3274       606316 :                     Some(l) => {
    3275       606316 :                         let lsn_range = l.get_lsn_range().start..cont_lsn;
    3276       606316 :                         fringe.update(
    3277       606316 :                             ReadableLayer::InMemoryLayer(l),
    3278       606316 :                             unmapped_keyspace.clone(),
    3279       606316 :                             lsn_range,
    3280       606316 :                         );
    3281       606316 :                     }
    3282              :                     None => {
    3283       466250 :                         for range in unmapped_keyspace.ranges.iter() {
    3284       466250 :                             let results = layers.range_search(range.clone(), cont_lsn);
    3285       466250 : 
    3286       466250 :                             results
    3287       466250 :                                 .found
    3288       466250 :                                 .into_iter()
    3289       466250 :                                 .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
    3290       239128 :                                     (
    3291       239128 :                                         ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
    3292       239128 :                                         keyspace_accum.to_keyspace(),
    3293       239128 :                                         lsn_floor..cont_lsn,
    3294       239128 :                                     )
    3295       466250 :                                 })
    3296       466250 :                                 .for_each(|(layer, keyspace, lsn_range)| {
    3297       239128 :                                     fringe.update(layer, keyspace, lsn_range)
    3298       466250 :                                 });
    3299       466250 :                         }
    3300              :                     }
    3301              :                 }
    3302              : 
    3303              :                 // It's safe to drop the layer map lock after planning the next round of reads.
    3304              :                 // The fringe keeps readable handles for the layers which are safe to read even
    3305              :                 // if layers were compacted or flushed.
    3306              :                 //
    3307              :                 // The more interesting consideration is: "Why is the read algorithm still correct
    3308              :                 // if the layer map changes while it is operating?". Doing a vectored read on a
    3309              :                 // timeline boils down to pushing an imaginary lsn boundary downwards for each range
    3310              :                 // covered by the read. The layer map tells us how to move the lsn downwards for a
    3311              :                 // range at *a particular point in time*. It is fine for the answer to be different
    3312              :                 // at two different time points.
    3313      1072544 :                 drop(guard);
    3314       624310 :             }
    3315              : 
    3316      1696854 :             if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
    3317       845430 :                 let next_cont_lsn = lsn_range.start;
    3318       845430 :                 layer_to_read
    3319       845430 :                     .get_values_reconstruct_data(
    3320       845430 :                         keyspace_to_read.clone(),
    3321       845430 :                         lsn_range,
    3322       845430 :                         reconstruct_state,
    3323       845430 :                         ctx,
    3324       845430 :                     )
    3325       183072 :                     .await?;
    3326              : 
    3327       845430 :                 unmapped_keyspace = keyspace_to_read;
    3328       845430 :                 cont_lsn = next_cont_lsn;
    3329       845430 : 
    3330       845430 :                 reconstruct_state.on_layer_visited(&layer_to_read);
    3331              :             } else {
    3332       851424 :                 break;
    3333       851424 :             }
    3334       851424 :         }
    3335       851424 : 
    3336       851424 :         Ok(TimelineVisitOutcome {
    3337       851424 :             completed_keyspace,
    3338       851424 :             image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
    3339       851424 :         })
    3340       851424 :     }
    3341              : 
    3342       224856 :     async fn get_ready_ancestor_timeline(
    3343       224856 :         &self,
    3344       224856 :         ancestor: &Arc<Timeline>,
    3345       224856 :         ctx: &RequestContext,
    3346       224856 :     ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
    3347       224856 :         // It's possible that the ancestor timeline isn't active yet, or
    3348       224856 :         // is active but hasn't yet caught up to the branch point. Wait
    3349       224856 :         // for it.
    3350       224856 :         //
    3351       224856 :         // This cannot happen while the pageserver is running normally,
    3352       224856 :         // because you cannot create a branch from a point that isn't
    3353       224856 :         // present in the pageserver yet. However, we don't wait for the
    3354       224856 :         // branch point to be uploaded to cloud storage before creating
    3355       224856 :         // a branch. I.e., the branch LSN need not be remote consistent
    3356       224856 :         // for the branching operation to succeed.
    3357       224856 :         //
    3358       224856 :         // Hence, if we try to load a tenant in such a state where
    3359       224856 :         // 1. the existence of the branch was persisted (in IndexPart and/or locally)
    3360       224856 :         // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
    3361       224856 :         // then we will need to wait for the ancestor timeline to
    3362       224856 :         // re-stream WAL up to branch_lsn before we access it.
    3363       224856 :         //
    3364       224856 :         // How can a tenant get in such a state?
    3365       224856 :         // - ungraceful pageserver process exit
    3366       224856 :         // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
    3367       224856 :         //
    3368       224856 :         // NB: this could be avoided by requiring
    3369       224856 :         //   branch_lsn >= remote_consistent_lsn
    3370       224856 :         // during branch creation.
    3371       224856 :         match ancestor.wait_to_become_active(ctx).await {
    3372       224854 :             Ok(()) => {}
    3373              :             Err(TimelineState::Stopping) => {
    3374              :                 // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
    3375            0 :                 return Err(GetReadyAncestorError::Cancelled);
    3376              :             }
    3377            2 :             Err(state) => {
    3378            2 :                 return Err(GetReadyAncestorError::BadState {
    3379            2 :                     timeline_id: ancestor.timeline_id,
    3380            2 :                     state,
    3381            2 :                 });
    3382              :             }
    3383              :         }
    3384       224854 :         ancestor
    3385       224854 :             .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
    3386            0 :             .await
    3387       224854 :             .map_err(|e| match e {
    3388            0 :                 e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
    3389            0 :                 WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
    3390            0 :                 WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
    3391            0 :                     timeline_id: ancestor.timeline_id,
    3392            0 :                     state,
    3393            0 :                 },
    3394       224854 :             })?;
    3395              : 
    3396       224854 :         Ok(ancestor.clone())
    3397       224856 :     }
    3398              : 
    3399       151304 :     pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
    3400       151304 :         &self.shard_identity
    3401       151304 :     }
    3402              : 
    3403              :     #[inline(always)]
    3404            0 :     pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
    3405            0 :         ShardTimelineId {
    3406            0 :             shard_index: ShardIndex {
    3407            0 :                 shard_number: self.shard_identity.number,
    3408            0 :                 shard_count: self.shard_identity.count,
    3409            0 :             },
    3410            0 :             timeline_id: self.timeline_id,
    3411            0 :         }
    3412            0 :     }
    3413              : 
    3414              :     /// Returns a non-frozen open in-memory layer for ingestion.
    3415              :     ///
    3416              :     /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
    3417              :     /// this function without holding the mutex.
    3418         1268 :     async fn get_layer_for_write(
    3419         1268 :         &self,
    3420         1268 :         lsn: Lsn,
    3421         1268 :         _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    3422         1268 :         ctx: &RequestContext,
    3423         1268 :     ) -> anyhow::Result<Arc<InMemoryLayer>> {
    3424         1268 :         let mut guard = self.layers.write().await;
    3425         1268 :         let gate_guard = self.gate.enter().context("enter gate for inmem layer")?;
    3426              : 
    3427         1268 :         let last_record_lsn = self.get_last_record_lsn();
    3428         1268 :         ensure!(
    3429         1268 :             lsn > last_record_lsn,
    3430            0 :             "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
    3431              :             lsn,
    3432              :             last_record_lsn,
    3433              :         );
    3434              : 
    3435         1268 :         let layer = guard
    3436         1268 :             .open_mut()?
    3437         1268 :             .get_layer_for_write(
    3438         1268 :                 lsn,
    3439         1268 :                 self.conf,
    3440         1268 :                 self.timeline_id,
    3441         1268 :                 self.tenant_shard_id,
    3442         1268 :                 gate_guard,
    3443         1268 :                 ctx,
    3444         1268 :             )
    3445          719 :             .await?;
    3446         1268 :         Ok(layer)
    3447         1268 :     }
    3448              : 
    3449      5279064 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    3450      5279064 :         assert!(new_lsn.is_aligned());
    3451              : 
    3452      5279064 :         self.metrics.last_record_gauge.set(new_lsn.0 as i64);
    3453      5279064 :         self.last_record_lsn.advance(new_lsn);
    3454      5279064 :     }
    3455              : 
    3456              :     /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
    3457              :     ///
    3458              :     /// Unconditional flush loop notification is given because in sharded cases we will want to
    3459              :     /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
    3460         1172 :     async fn freeze_inmem_layer_at(
    3461         1172 :         &self,
    3462         1172 :         at: Lsn,
    3463         1172 :         write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    3464         1172 :     ) -> Result<u64, FlushLayerError> {
    3465         1172 :         let frozen = {
    3466         1172 :             let mut guard = self.layers.write().await;
    3467         1172 :             guard
    3468         1172 :                 .open_mut()?
    3469         1172 :                 .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
    3470            1 :                 .await
    3471              :         };
    3472              : 
    3473         1172 :         if frozen {
    3474         1144 :             let now = Instant::now();
    3475         1144 :             *(self.last_freeze_ts.write().unwrap()) = now;
    3476         1144 :         }
    3477              : 
    3478              :         // Increment the flush cycle counter and wake up the flush task.
    3479              :         // Remember the new value, so that when we listen for the flush
    3480              :         // to finish, we know when the flush that we initiated has
    3481              :         // finished, instead of some other flush that was started earlier.
    3482         1172 :         let mut my_flush_request = 0;
    3483         1172 : 
    3484         1172 :         let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
    3485         1172 :         if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
    3486            0 :             return Err(FlushLayerError::NotRunning(flush_loop_state));
    3487         1172 :         }
    3488         1172 : 
    3489         1172 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    3490         1172 :             my_flush_request = *counter + 1;
    3491         1172 :             *counter = my_flush_request;
    3492         1172 :             *lsn = std::cmp::max(at, *lsn);
    3493         1172 :         });
    3494         1172 : 
    3495         1172 :         assert_ne!(my_flush_request, 0);
    3496              : 
    3497         1172 :         Ok(my_flush_request)
    3498         1172 :     }
    3499              : 
    3500              :     /// Layer flusher task's main loop.
    3501          412 :     async fn flush_loop(
    3502          412 :         self: &Arc<Self>,
    3503          412 :         mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
    3504          412 :         ctx: &RequestContext,
    3505          412 :     ) {
    3506          412 :         info!("started flush loop");
    3507              :         loop {
    3508         1546 :             tokio::select! {
    3509         1546 :                 _ = self.cancel.cancelled() => {
    3510           10 :                     info!("shutting down layer flush task due to Timeline::cancel");
    3511           10 :                     break;
    3512              :                 },
    3513         1546 :                 _ = layer_flush_start_rx.changed() => {}
    3514         1134 :             }
    3515         1134 :             trace!("waking up");
    3516         1134 :             let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
    3517         1134 : 
    3518         1134 :             // The highest LSN to which we flushed in the loop over frozen layers
    3519         1134 :             let mut flushed_to_lsn = Lsn(0);
    3520              : 
    3521         1134 :             let result = loop {
    3522         2278 :                 if self.cancel.is_cancelled() {
    3523            0 :                     info!("dropping out of flush loop for timeline shutdown");
    3524              :                     // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
    3525              :                     // anyone waiting on that will respect self.cancel as well: they will stop
    3526              :                     // waiting at the same time we as drop out of this loop.
    3527            0 :                     return;
    3528         2278 :                 }
    3529         2278 : 
    3530         2278 :                 let timer = self.metrics.flush_time_histo.start_timer();
    3531              : 
    3532              :                 let num_frozen_layers;
    3533              :                 let frozen_layer_total_size;
    3534         2278 :                 let layer_to_flush = {
    3535         2278 :                     let guard = self.layers.read().await;
    3536         2278 :                     let Ok(lm) = guard.layer_map() else {
    3537            0 :                         info!("dropping out of flush loop for timeline shutdown");
    3538            0 :                         return;
    3539              :                     };
    3540         2278 :                     num_frozen_layers = lm.frozen_layers.len();
    3541         2278 :                     frozen_layer_total_size = lm
    3542         2278 :                         .frozen_layers
    3543         2278 :                         .iter()
    3544         2278 :                         .map(|l| l.estimated_in_mem_size())
    3545         2278 :                         .sum::<u64>();
    3546         2278 :                     lm.frozen_layers.front().cloned()
    3547              :                     // drop 'layers' lock to allow concurrent reads and writes
    3548              :                 };
    3549         2278 :                 let Some(layer_to_flush) = layer_to_flush else {
    3550         1134 :                     break Ok(());
    3551              :                 };
    3552         1144 :                 if num_frozen_layers
    3553         1144 :                     > std::cmp::max(
    3554         1144 :                         self.get_compaction_threshold(),
    3555         1144 :                         DEFAULT_COMPACTION_THRESHOLD,
    3556         1144 :                     )
    3557            0 :                     && frozen_layer_total_size >= /* 128 MB */ 128000000
    3558              :                 {
    3559            0 :                     tracing::warn!(
    3560            0 :                         "too many frozen layers: {num_frozen_layers} layers with estimated in-mem size of {frozen_layer_total_size} bytes",
    3561              :                     );
    3562         1144 :                 }
    3563        17084 :                 match self.flush_frozen_layer(layer_to_flush, ctx).await {
    3564         1144 :                     Ok(this_layer_to_lsn) => {
    3565         1144 :                         flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
    3566         1144 :                     }
    3567              :                     Err(FlushLayerError::Cancelled) => {
    3568            0 :                         info!("dropping out of flush loop for timeline shutdown");
    3569            0 :                         return;
    3570              :                     }
    3571            0 :                     err @ Err(
    3572            0 :                         FlushLayerError::NotRunning(_)
    3573            0 :                         | FlushLayerError::Other(_)
    3574            0 :                         | FlushLayerError::CreateImageLayersError(_),
    3575            0 :                     ) => {
    3576            0 :                         error!("could not flush frozen layer: {err:?}");
    3577            0 :                         break err.map(|_| ());
    3578              :                     }
    3579              :                 }
    3580         1144 :                 timer.stop_and_record();
    3581              :             };
    3582              : 
    3583              :             // Unsharded tenants should never advance their LSN beyond the end of the
    3584              :             // highest layer they write: such gaps between layer data and the frozen LSN
    3585              :             // are only legal on sharded tenants.
    3586         1134 :             debug_assert!(
    3587         1134 :                 self.shard_identity.count.count() > 1
    3588         1134 :                     || flushed_to_lsn >= frozen_to_lsn
    3589           68 :                     || !flushed_to_lsn.is_valid()
    3590              :             );
    3591              : 
    3592         1134 :             if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
    3593              :                 // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
    3594              :                 // to us via layer_flush_start_rx, then advance it here.
    3595              :                 //
    3596              :                 // This path is only taken for tenants with multiple shards: single sharded tenants should
    3597              :                 // never encounter a gap in the wal.
    3598            0 :                 let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
    3599            0 :                 tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
    3600            0 :                 if self.set_disk_consistent_lsn(frozen_to_lsn) {
    3601            0 :                     if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
    3602            0 :                         tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
    3603            0 :                     }
    3604            0 :                 }
    3605         1134 :             }
    3606              : 
    3607              :             // Notify any listeners that we're done
    3608         1134 :             let _ = self
    3609         1134 :                 .layer_flush_done_tx
    3610         1134 :                 .send_replace((flush_counter, result));
    3611              :         }
    3612           10 :     }
    3613              : 
    3614              :     /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
    3615         1092 :     async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
    3616         1092 :         let mut rx = self.layer_flush_done_tx.subscribe();
    3617              :         loop {
    3618              :             {
    3619         2177 :                 let (last_result_counter, last_result) = &*rx.borrow();
    3620         2177 :                 if *last_result_counter >= request {
    3621         1092 :                     if let Err(err) = last_result {
    3622              :                         // We already logged the original error in
    3623              :                         // flush_loop. We cannot propagate it to the caller
    3624              :                         // here, because it might not be Cloneable
    3625            0 :                         return Err(err.clone());
    3626              :                     } else {
    3627         1092 :                         return Ok(());
    3628              :                     }
    3629         1085 :                 }
    3630         1085 :             }
    3631         1085 :             trace!("waiting for flush to complete");
    3632         1085 :             tokio::select! {
    3633         1085 :                 rx_e = rx.changed() => {
    3634         1085 :                     rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
    3635              :                 },
    3636              :                 // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
    3637              :                 // the notification from [`flush_loop`] that it completed.
    3638         1085 :                 _ = self.cancel.cancelled() => {
    3639            0 :                     tracing::info!("Cancelled layer flush due on timeline shutdown");
    3640            0 :                     return Ok(())
    3641              :                 }
    3642              :             };
    3643         1085 :             trace!("done")
    3644              :         }
    3645         1092 :     }
    3646              : 
    3647              :     /// Flush one frozen in-memory layer to disk, as a new delta layer.
    3648              :     ///
    3649              :     /// Return value is the last lsn (inclusive) of the layer that was frozen.
    3650         1144 :     #[instrument(skip_all, fields(layer=%frozen_layer))]
    3651              :     async fn flush_frozen_layer(
    3652              :         self: &Arc<Self>,
    3653              :         frozen_layer: Arc<InMemoryLayer>,
    3654              :         ctx: &RequestContext,
    3655              :     ) -> Result<Lsn, FlushLayerError> {
    3656              :         debug_assert_current_span_has_tenant_and_timeline_id();
    3657              : 
    3658              :         // As a special case, when we have just imported an image into the repository,
    3659              :         // instead of writing out a L0 delta layer, we directly write out image layer
    3660              :         // files instead. This is possible as long as *all* the data imported into the
    3661              :         // repository have the same LSN.
    3662              :         let lsn_range = frozen_layer.get_lsn_range();
    3663              : 
    3664              :         // Whether to directly create image layers for this flush, or flush them as delta layers
    3665              :         let create_image_layer =
    3666              :             lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
    3667              : 
    3668              :         #[cfg(test)]
    3669              :         {
    3670              :             match &mut *self.flush_loop_state.lock().unwrap() {
    3671              :                 FlushLoopState::NotStarted | FlushLoopState::Exited => {
    3672              :                     panic!("flush loop not running")
    3673              :                 }
    3674              :                 FlushLoopState::Running {
    3675              :                     expect_initdb_optimization,
    3676              :                     initdb_optimization_count,
    3677              :                     ..
    3678              :                 } => {
    3679              :                     if create_image_layer {
    3680              :                         *initdb_optimization_count += 1;
    3681              :                     } else {
    3682              :                         assert!(!*expect_initdb_optimization, "expected initdb optimization");
    3683              :                     }
    3684              :                 }
    3685              :             }
    3686              :         }
    3687              : 
    3688              :         let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
    3689              :             // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
    3690              :             // require downloading anything during initial import.
    3691              :             let ((rel_partition, metadata_partition), _lsn) = self
    3692              :                 .repartition(
    3693              :                     self.initdb_lsn,
    3694              :                     self.get_compaction_target_size(),
    3695              :                     EnumSet::empty(),
    3696              :                     ctx,
    3697              :                 )
    3698              :                 .await
    3699            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
    3700              : 
    3701              :             if self.cancel.is_cancelled() {
    3702              :                 return Err(FlushLayerError::Cancelled);
    3703              :             }
    3704              : 
    3705              :             let mut layers_to_upload = Vec::new();
    3706              :             layers_to_upload.extend(
    3707              :                 self.create_image_layers(
    3708              :                     &rel_partition,
    3709              :                     self.initdb_lsn,
    3710              :                     ImageLayerCreationMode::Initial,
    3711              :                     ctx,
    3712              :                 )
    3713              :                 .await?,
    3714              :             );
    3715              :             if !metadata_partition.parts.is_empty() {
    3716              :                 assert_eq!(
    3717              :                     metadata_partition.parts.len(),
    3718              :                     1,
    3719              :                     "currently sparse keyspace should only contain a single metadata keyspace"
    3720              :                 );
    3721              :                 layers_to_upload.extend(
    3722              :                     self.create_image_layers(
    3723              :                         // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
    3724              :                         // every single key within the keyspace, and therefore, it's safe to force converting it
    3725              :                         // into a dense keyspace before calling this function.
    3726              :                         &metadata_partition.into_dense(),
    3727              :                         self.initdb_lsn,
    3728              :                         ImageLayerCreationMode::Initial,
    3729              :                         ctx,
    3730              :                     )
    3731              :                     .await?,
    3732              :                 );
    3733              :             }
    3734              : 
    3735              :             (layers_to_upload, None)
    3736              :         } else {
    3737              :             // Normal case, write out a L0 delta layer file.
    3738              :             // `create_delta_layer` will not modify the layer map.
    3739              :             // We will remove frozen layer and add delta layer in one atomic operation later.
    3740              :             let Some(layer) = self
    3741              :                 .create_delta_layer(&frozen_layer, None, ctx)
    3742              :                 .await
    3743            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?
    3744              :             else {
    3745              :                 panic!("delta layer cannot be empty if no filter is applied");
    3746              :             };
    3747              :             (
    3748              :                 // FIXME: even though we have a single image and single delta layer assumption
    3749              :                 // we push them to vec
    3750              :                 vec![layer.clone()],
    3751              :                 Some(layer),
    3752              :             )
    3753              :         };
    3754              : 
    3755              :         pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
    3756              : 
    3757              :         if self.cancel.is_cancelled() {
    3758              :             return Err(FlushLayerError::Cancelled);
    3759              :         }
    3760              : 
    3761              :         let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
    3762              : 
    3763              :         // The new on-disk layers are now in the layer map. We can remove the
    3764              :         // in-memory layer from the map now. The flushed layer is stored in
    3765              :         // the mapping in `create_delta_layer`.
    3766              :         {
    3767              :             let mut guard = self.layers.write().await;
    3768              : 
    3769              :             guard.open_mut()?.finish_flush_l0_layer(
    3770              :                 delta_layer_to_add.as_ref(),
    3771              :                 &frozen_layer,
    3772              :                 &self.metrics,
    3773              :             );
    3774              : 
    3775              :             if self.set_disk_consistent_lsn(disk_consistent_lsn) {
    3776              :                 // Schedule remote uploads that will reflect our new disk_consistent_lsn
    3777              :                 self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
    3778            0 :                     .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
    3779              :             }
    3780              :             // release lock on 'layers'
    3781              :         };
    3782              : 
    3783              :         // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
    3784              :         // This makes us refuse ingest until the new layers have been persisted to the remote.
    3785              :         self.remote_client
    3786              :             .wait_completion()
    3787              :             .await
    3788            0 :             .map_err(|e| match e {
    3789              :                 WaitCompletionError::UploadQueueShutDownOrStopped
    3790              :                 | WaitCompletionError::NotInitialized(
    3791              :                     NotInitialized::ShuttingDown | NotInitialized::Stopped,
    3792            0 :                 ) => FlushLayerError::Cancelled,
    3793              :                 WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
    3794            0 :                     FlushLayerError::Other(anyhow!(e).into())
    3795              :                 }
    3796            0 :             })?;
    3797              : 
    3798              :         // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
    3799              :         // a compaction can delete the file and then it won't be available for uploads any more.
    3800              :         // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
    3801              :         // race situation.
    3802              :         // See https://github.com/neondatabase/neon/issues/4526
    3803              :         pausable_failpoint!("flush-frozen-pausable");
    3804              : 
    3805              :         // This failpoint is used by another test case `test_pageserver_recovery`.
    3806              :         fail_point!("flush-frozen-exit");
    3807              : 
    3808              :         Ok(Lsn(lsn_range.end.0 - 1))
    3809              :     }
    3810              : 
    3811              :     /// Return true if the value changed
    3812              :     ///
    3813              :     /// This function must only be used from the layer flush task.
    3814         1144 :     fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
    3815         1144 :         let old_value = self.disk_consistent_lsn.fetch_max(new_value);
    3816         1144 :         assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
    3817         1144 :         new_value != old_value
    3818         1144 :     }
    3819              : 
    3820              :     /// Update metadata file
    3821         1146 :     fn schedule_uploads(
    3822         1146 :         &self,
    3823         1146 :         disk_consistent_lsn: Lsn,
    3824         1146 :         layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
    3825         1146 :     ) -> anyhow::Result<()> {
    3826         1146 :         // We can only save a valid 'prev_record_lsn' value on disk if we
    3827         1146 :         // flushed *all* in-memory changes to disk. We only track
    3828         1146 :         // 'prev_record_lsn' in memory for the latest processed record, so we
    3829         1146 :         // don't remember what the correct value that corresponds to some old
    3830         1146 :         // LSN is. But if we flush everything, then the value corresponding
    3831         1146 :         // current 'last_record_lsn' is correct and we can store it on disk.
    3832         1146 :         let RecordLsn {
    3833         1146 :             last: last_record_lsn,
    3834         1146 :             prev: prev_record_lsn,
    3835         1146 :         } = self.last_record_lsn.load();
    3836         1146 :         let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
    3837         1066 :             Some(prev_record_lsn)
    3838              :         } else {
    3839           80 :             None
    3840              :         };
    3841              : 
    3842         1146 :         let update = crate::tenant::metadata::MetadataUpdate::new(
    3843         1146 :             disk_consistent_lsn,
    3844         1146 :             ondisk_prev_record_lsn,
    3845         1146 :             *self.latest_gc_cutoff_lsn.read(),
    3846         1146 :         );
    3847         1146 : 
    3848         1146 :         fail_point!("checkpoint-before-saving-metadata", |x| bail!(
    3849            0 :             "{}",
    3850            0 :             x.unwrap()
    3851         1146 :         ));
    3852              : 
    3853         2302 :         for layer in layers_to_upload {
    3854         1156 :             self.remote_client.schedule_layer_file_upload(layer)?;
    3855              :         }
    3856         1146 :         self.remote_client
    3857         1146 :             .schedule_index_upload_for_metadata_update(&update)?;
    3858              : 
    3859         1146 :         Ok(())
    3860         1146 :     }
    3861              : 
    3862            0 :     pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
    3863            0 :         self.remote_client
    3864            0 :             .preserve_initdb_archive(
    3865            0 :                 &self.tenant_shard_id.tenant_id,
    3866            0 :                 &self.timeline_id,
    3867            0 :                 &self.cancel,
    3868            0 :             )
    3869            0 :             .await
    3870            0 :     }
    3871              : 
    3872              :     // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
    3873              :     // in layer map immediately. The caller is responsible to put it into the layer map.
    3874          968 :     async fn create_delta_layer(
    3875          968 :         self: &Arc<Self>,
    3876          968 :         frozen_layer: &Arc<InMemoryLayer>,
    3877          968 :         key_range: Option<Range<Key>>,
    3878          968 :         ctx: &RequestContext,
    3879          968 :     ) -> anyhow::Result<Option<ResidentLayer>> {
    3880          968 :         let self_clone = Arc::clone(self);
    3881          968 :         let frozen_layer = Arc::clone(frozen_layer);
    3882          968 :         let ctx = ctx.attached_child();
    3883          968 :         let work = async move {
    3884          968 :             let Some((desc, path)) = frozen_layer
    3885          968 :                 .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
    3886        10250 :                 .await?
    3887              :             else {
    3888            0 :                 return Ok(None);
    3889              :             };
    3890          968 :             let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
    3891              : 
    3892              :             // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
    3893              :             // We just need to fsync the directory in which these inodes are linked,
    3894              :             // which we know to be the timeline directory.
    3895              :             //
    3896              :             // We use fatal_err() below because the after write_to_disk returns with success,
    3897              :             // the in-memory state of the filesystem already has the layer file in its final place,
    3898              :             // and subsequent pageserver code could think it's durable while it really isn't.
    3899          968 :             let timeline_dir = VirtualFile::open(
    3900          968 :                 &self_clone
    3901          968 :                     .conf
    3902          968 :                     .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
    3903          968 :                 &ctx,
    3904          968 :             )
    3905          486 :             .await
    3906          968 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    3907          968 :             timeline_dir
    3908          968 :                 .sync_all()
    3909          484 :                 .await
    3910          968 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    3911          968 :             anyhow::Ok(Some(new_delta))
    3912          968 :         };
    3913              :         // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
    3914              :         // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
    3915              :         use crate::virtual_file::io_engine::IoEngine;
    3916          968 :         match crate::virtual_file::io_engine::get() {
    3917            0 :             IoEngine::NotSet => panic!("io engine not set"),
    3918              :             IoEngine::StdFs => {
    3919          484 :                 let span = tracing::info_span!("blocking");
    3920          484 :                 tokio::task::spawn_blocking({
    3921          484 :                     move || Handle::current().block_on(work.instrument(span))
    3922          484 :                 })
    3923          484 :                 .await
    3924          484 :                 .context("spawn_blocking")
    3925          484 :                 .and_then(|x| x)
    3926              :             }
    3927              :             #[cfg(target_os = "linux")]
    3928        11215 :             IoEngine::TokioEpollUring => work.await,
    3929              :         }
    3930          968 :     }
    3931              : 
    3932          540 :     async fn repartition(
    3933          540 :         &self,
    3934          540 :         lsn: Lsn,
    3935          540 :         partition_size: u64,
    3936          540 :         flags: EnumSet<CompactFlags>,
    3937          540 :         ctx: &RequestContext,
    3938          540 :     ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
    3939          540 :         let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
    3940              :             // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
    3941              :             // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
    3942              :             // and hence before the compaction task starts.
    3943            0 :             return Err(CompactionError::Other(anyhow!(
    3944            0 :                 "repartition() called concurrently, this should not happen"
    3945            0 :             )));
    3946              :         };
    3947          540 :         let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
    3948          540 :         if lsn < *partition_lsn {
    3949            0 :             return Err(CompactionError::Other(anyhow!(
    3950            0 :                 "repartition() called with LSN going backwards, this should not happen"
    3951            0 :             )));
    3952          540 :         }
    3953          540 : 
    3954          540 :         let distance = lsn.0 - partition_lsn.0;
    3955          540 :         if *partition_lsn != Lsn(0)
    3956          262 :             && distance <= self.repartition_threshold
    3957          262 :             && !flags.contains(CompactFlags::ForceRepartition)
    3958              :         {
    3959          248 :             debug!(
    3960              :                 distance,
    3961              :                 threshold = self.repartition_threshold,
    3962            0 :                 "no repartitioning needed"
    3963              :             );
    3964          248 :             return Ok((
    3965          248 :                 (dense_partition.clone(), sparse_partition.clone()),
    3966          248 :                 *partition_lsn,
    3967          248 :             ));
    3968          292 :         }
    3969              : 
    3970        15831 :         let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
    3971          292 :         let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
    3972          292 :         let sparse_partitioning = SparseKeyPartitioning {
    3973          292 :             parts: vec![sparse_ks],
    3974          292 :         }; // no partitioning for metadata keys for now
    3975          292 :         *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
    3976          292 : 
    3977          292 :         Ok((partitioning_guard.0.clone(), partitioning_guard.1))
    3978          540 :     }
    3979              : 
    3980              :     // Is it time to create a new image layer for the given partition?
    3981           14 :     async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
    3982           14 :         let threshold = self.get_image_creation_threshold();
    3983              : 
    3984           14 :         let guard = self.layers.read().await;
    3985           14 :         let Ok(layers) = guard.layer_map() else {
    3986            0 :             return false;
    3987              :         };
    3988              : 
    3989           14 :         let mut max_deltas = 0;
    3990           28 :         for part_range in &partition.ranges {
    3991           14 :             let image_coverage = layers.image_coverage(part_range, lsn);
    3992           28 :             for (img_range, last_img) in image_coverage {
    3993           14 :                 let img_lsn = if let Some(last_img) = last_img {
    3994            0 :                     last_img.get_lsn_range().end
    3995              :                 } else {
    3996           14 :                     Lsn(0)
    3997              :                 };
    3998              :                 // Let's consider an example:
    3999              :                 //
    4000              :                 // delta layer with LSN range 71-81
    4001              :                 // delta layer with LSN range 81-91
    4002              :                 // delta layer with LSN range 91-101
    4003              :                 // image layer at LSN 100
    4004              :                 //
    4005              :                 // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
    4006              :                 // there's no need to create a new one. We check this case explicitly, to avoid passing
    4007              :                 // a bogus range to count_deltas below, with start > end. It's even possible that there
    4008              :                 // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
    4009              :                 // after we read last_record_lsn, which is passed here in the 'lsn' argument.
    4010           14 :                 if img_lsn < lsn {
    4011           14 :                     let num_deltas =
    4012           14 :                         layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
    4013           14 : 
    4014           14 :                     max_deltas = max_deltas.max(num_deltas);
    4015           14 :                     if num_deltas >= threshold {
    4016            0 :                         debug!(
    4017            0 :                             "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
    4018              :                             img_range.start, img_range.end, num_deltas, img_lsn, lsn
    4019              :                         );
    4020            0 :                         return true;
    4021           14 :                     }
    4022            0 :                 }
    4023              :             }
    4024              :         }
    4025              : 
    4026           14 :         debug!(
    4027              :             max_deltas,
    4028            0 :             "none of the partitioned ranges had >= {threshold} deltas"
    4029              :         );
    4030           14 :         false
    4031           14 :     }
    4032              : 
    4033              :     /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
    4034              :     /// so that at most one image layer will be produced from this function.
    4035          202 :     async fn create_image_layer_for_rel_blocks(
    4036          202 :         self: &Arc<Self>,
    4037          202 :         partition: &KeySpace,
    4038          202 :         mut image_layer_writer: ImageLayerWriter,
    4039          202 :         lsn: Lsn,
    4040          202 :         ctx: &RequestContext,
    4041          202 :         img_range: Range<Key>,
    4042          202 :         start: Key,
    4043          202 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4044          202 :         let mut wrote_keys = false;
    4045          202 : 
    4046          202 :         let mut key_request_accum = KeySpaceAccum::new();
    4047         1344 :         for range in &partition.ranges {
    4048         1142 :             let mut key = range.start;
    4049         2472 :             while key < range.end {
    4050              :                 // Decide whether to retain this key: usually we do, but sharded tenants may
    4051              :                 // need to drop keys that don't belong to them.  If we retain the key, add it
    4052              :                 // to `key_request_accum` for later issuing a vectored get
    4053         1330 :                 if self.shard_identity.is_key_disposable(&key) {
    4054            0 :                     debug!(
    4055            0 :                         "Dropping key {} during compaction (it belongs on shard {:?})",
    4056            0 :                         key,
    4057            0 :                         self.shard_identity.get_shard_number(&key)
    4058              :                     );
    4059         1330 :                 } else {
    4060         1330 :                     key_request_accum.add_key(key);
    4061         1330 :                 }
    4062              : 
    4063         1330 :                 let last_key_in_range = key.next() == range.end;
    4064         1330 :                 key = key.next();
    4065         1330 : 
    4066         1330 :                 // Maybe flush `key_rest_accum`
    4067         1330 :                 if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
    4068         1330 :                     || (last_key_in_range && key_request_accum.raw_size() > 0)
    4069              :                 {
    4070         1142 :                     let results = self
    4071         1142 :                         .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
    4072           50 :                         .await?;
    4073              : 
    4074         1142 :                     if self.cancel.is_cancelled() {
    4075            0 :                         return Err(CreateImageLayersError::Cancelled);
    4076         1142 :                     }
    4077              : 
    4078         2472 :                     for (img_key, img) in results {
    4079         1330 :                         let img = match img {
    4080         1330 :                             Ok(img) => img,
    4081            0 :                             Err(err) => {
    4082            0 :                                 // If we fail to reconstruct a VM or FSM page, we can zero the
    4083            0 :                                 // page without losing any actual user data. That seems better
    4084            0 :                                 // than failing repeatedly and getting stuck.
    4085            0 :                                 //
    4086            0 :                                 // We had a bug at one point, where we truncated the FSM and VM
    4087            0 :                                 // in the pageserver, but the Postgres didn't know about that
    4088            0 :                                 // and continued to generate incremental WAL records for pages
    4089            0 :                                 // that didn't exist in the pageserver. Trying to replay those
    4090            0 :                                 // WAL records failed to find the previous image of the page.
    4091            0 :                                 // This special case allows us to recover from that situation.
    4092            0 :                                 // See https://github.com/neondatabase/neon/issues/2601.
    4093            0 :                                 //
    4094            0 :                                 // Unfortunately we cannot do this for the main fork, or for
    4095            0 :                                 // any metadata keys, keys, as that would lead to actual data
    4096            0 :                                 // loss.
    4097            0 :                                 if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
    4098            0 :                                     warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
    4099            0 :                                     ZERO_PAGE.clone()
    4100              :                                 } else {
    4101            0 :                                     return Err(CreateImageLayersError::from(err));
    4102              :                                 }
    4103              :                             }
    4104              :                         };
    4105              : 
    4106              :                         // Write all the keys we just read into our new image layer.
    4107         1457 :                         image_layer_writer.put_image(img_key, img, ctx).await?;
    4108         1330 :                         wrote_keys = true;
    4109              :                     }
    4110          188 :                 }
    4111              :             }
    4112              :         }
    4113              : 
    4114          202 :         if wrote_keys {
    4115              :             // Normal path: we have written some data into the new image layer for this
    4116              :             // partition, so flush it to disk.
    4117          404 :             let (desc, path) = image_layer_writer.finish(ctx).await?;
    4118          202 :             let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    4119          202 :             info!("created image layer for rel {}", image_layer.local_path());
    4120          202 :             Ok(ImageLayerCreationOutcome {
    4121          202 :                 image: Some(image_layer),
    4122          202 :                 next_start_key: img_range.end,
    4123          202 :             })
    4124              :         } else {
    4125              :             // Special case: the image layer may be empty if this is a sharded tenant and the
    4126              :             // partition does not cover any keys owned by this shard.  In this case, to ensure
    4127              :             // we don't leave gaps between image layers, leave `start` where it is, so that the next
    4128              :             // layer we write will cover the key range that we just scanned.
    4129            0 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4130            0 :             Ok(ImageLayerCreationOutcome {
    4131            0 :                 image: None,
    4132            0 :                 next_start_key: start,
    4133            0 :             })
    4134              :         }
    4135          202 :     }
    4136              : 
    4137              :     /// Create an image layer for metadata keys. This function produces one image layer for all metadata
    4138              :     /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
    4139              :     /// would not be too large to fit in a single image layer.
    4140              :     #[allow(clippy::too_many_arguments)]
    4141          192 :     async fn create_image_layer_for_metadata_keys(
    4142          192 :         self: &Arc<Self>,
    4143          192 :         partition: &KeySpace,
    4144          192 :         mut image_layer_writer: ImageLayerWriter,
    4145          192 :         lsn: Lsn,
    4146          192 :         ctx: &RequestContext,
    4147          192 :         img_range: Range<Key>,
    4148          192 :         mode: ImageLayerCreationMode,
    4149          192 :         start: Key,
    4150          192 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4151          192 :         // Metadata keys image layer creation.
    4152          192 :         let mut reconstruct_state = ValuesReconstructState::default();
    4153          192 :         let begin = Instant::now();
    4154          192 :         let data = self
    4155          192 :             .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
    4156         1025 :             .await?;
    4157          192 :         let (data, total_kb_retrieved, total_keys_retrieved) = {
    4158          192 :             let mut new_data = BTreeMap::new();
    4159          192 :             let mut total_kb_retrieved = 0;
    4160          192 :             let mut total_keys_retrieved = 0;
    4161        10204 :             for (k, v) in data {
    4162        10012 :                 let v = v?;
    4163        10012 :                 total_kb_retrieved += KEY_SIZE + v.len();
    4164        10012 :                 total_keys_retrieved += 1;
    4165        10012 :                 new_data.insert(k, v);
    4166              :             }
    4167          192 :             (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
    4168          192 :         };
    4169          192 :         let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
    4170          192 :         let elapsed = begin.elapsed();
    4171          192 : 
    4172          192 :         let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
    4173          192 :         info!(
    4174            0 :             "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
    4175              :         );
    4176              : 
    4177          192 :         if !trigger_generation && mode == ImageLayerCreationMode::Try {
    4178            2 :             return Ok(ImageLayerCreationOutcome {
    4179            2 :                 image: None,
    4180            2 :                 next_start_key: img_range.end,
    4181            2 :             });
    4182          190 :         }
    4183          190 :         if self.cancel.is_cancelled() {
    4184            0 :             return Err(CreateImageLayersError::Cancelled);
    4185          190 :         }
    4186          190 :         let mut wrote_any_image = false;
    4187        10202 :         for (k, v) in data {
    4188        10012 :             if v.is_empty() {
    4189              :                 // the key has been deleted, it does not need an image
    4190              :                 // in metadata keyspace, an empty image == tombstone
    4191            8 :                 continue;
    4192        10004 :             }
    4193        10004 :             wrote_any_image = true;
    4194        10004 : 
    4195        10004 :             // No need to handle sharding b/c metadata keys are always on the 0-th shard.
    4196        10004 : 
    4197        10004 :             // TODO: split image layers to avoid too large layer files. Too large image files are not handled
    4198        10004 :             // on the normal data path either.
    4199        10160 :             image_layer_writer.put_image(k, v, ctx).await?;
    4200              :         }
    4201              : 
    4202          190 :         if wrote_any_image {
    4203              :             // Normal path: we have written some data into the new image layer for this
    4204              :             // partition, so flush it to disk.
    4205           25 :             let (desc, path) = image_layer_writer.finish(ctx).await?;
    4206           12 :             let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    4207           12 :             info!(
    4208            0 :                 "created image layer for metadata {}",
    4209            0 :                 image_layer.local_path()
    4210              :             );
    4211           12 :             Ok(ImageLayerCreationOutcome {
    4212           12 :                 image: Some(image_layer),
    4213           12 :                 next_start_key: img_range.end,
    4214           12 :             })
    4215              :         } else {
    4216              :             // Special case: the image layer may be empty if this is a sharded tenant and the
    4217              :             // partition does not cover any keys owned by this shard. In this case, to ensure
    4218              :             // we don't leave gaps between image layers, leave `start` where it is, so that the next
    4219              :             // layer we write will cover the key range that we just scanned.
    4220          178 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4221          178 :             Ok(ImageLayerCreationOutcome {
    4222          178 :                 image: None,
    4223          178 :                 next_start_key: start,
    4224          178 :             })
    4225              :         }
    4226          192 :     }
    4227              : 
    4228              :     /// Predicate function which indicates whether we should check if new image layers
    4229              :     /// are required. Since checking if new image layers are required is expensive in
    4230              :     /// terms of CPU, we only do it in the following cases:
    4231              :     /// 1. If the timeline has ingested sufficient WAL to justify the cost
    4232              :     /// 2. If enough time has passed since the last check:
    4233              :     ///     1. For large tenants, we wish to perform the check more often since they
    4234              :     ///        suffer from the lack of image layers
    4235              :     ///     2. For small tenants (that can mostly fit in RAM), we use a much longer interval
    4236          716 :     fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
    4237              :         const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
    4238              : 
    4239          716 :         let last_checks_at = self.last_image_layer_creation_check_at.load();
    4240          716 :         let distance = lsn
    4241          716 :             .checked_sub(last_checks_at)
    4242          716 :             .expect("Attempt to compact with LSN going backwards");
    4243          716 :         let min_distance =
    4244          716 :             self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
    4245          716 : 
    4246          716 :         let distance_based_decision = distance.0 >= min_distance;
    4247          716 : 
    4248          716 :         let mut time_based_decision = false;
    4249          716 :         let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
    4250          716 :         if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
    4251          614 :             let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
    4252              :             {
    4253            0 :                 self.get_checkpoint_timeout()
    4254              :             } else {
    4255          614 :                 Duration::from_secs(3600 * 48)
    4256              :             };
    4257              : 
    4258          614 :             time_based_decision = match *last_check_instant {
    4259          438 :                 Some(last_check) => {
    4260          438 :                     let elapsed = last_check.elapsed();
    4261          438 :                     elapsed >= check_required_after
    4262              :                 }
    4263          176 :                 None => true,
    4264              :             };
    4265          102 :         }
    4266              : 
    4267              :         // Do the expensive delta layer counting only if this timeline has ingested sufficient
    4268              :         // WAL since the last check or a checkpoint timeout interval has elapsed since the last
    4269              :         // check.
    4270          716 :         let decision = distance_based_decision || time_based_decision;
    4271              : 
    4272          716 :         if decision {
    4273          178 :             self.last_image_layer_creation_check_at.store(lsn);
    4274          178 :             *last_check_instant = Some(Instant::now());
    4275          538 :         }
    4276              : 
    4277          716 :         decision
    4278          716 :     }
    4279              : 
    4280          716 :     #[tracing::instrument(skip_all, fields(%lsn, %mode))]
    4281              :     async fn create_image_layers(
    4282              :         self: &Arc<Timeline>,
    4283              :         partitioning: &KeyPartitioning,
    4284              :         lsn: Lsn,
    4285              :         mode: ImageLayerCreationMode,
    4286              :         ctx: &RequestContext,
    4287              :     ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
    4288              :         let timer = self.metrics.create_images_time_histo.start_timer();
    4289              :         let mut image_layers = Vec::new();
    4290              : 
    4291              :         // We need to avoid holes between generated image layers.
    4292              :         // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
    4293              :         // image layer with hole between them. In this case such layer can not be utilized by GC.
    4294              :         //
    4295              :         // How such hole between partitions can appear?
    4296              :         // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
    4297              :         // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
    4298              :         // If there is delta layer <100000000..300000000> then it never be garbage collected because
    4299              :         // image layers  <100000000..100000099> and <200000000..200000199> are not completely covering it.
    4300              :         let mut start = Key::MIN;
    4301              : 
    4302              :         let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
    4303              : 
    4304              :         for partition in partitioning.parts.iter() {
    4305              :             if self.cancel.is_cancelled() {
    4306              :                 return Err(CreateImageLayersError::Cancelled);
    4307              :             }
    4308              : 
    4309              :             let img_range = start..partition.ranges.last().unwrap().end;
    4310              :             let compact_metadata = partition.overlaps(&Key::metadata_key_range());
    4311              :             if compact_metadata {
    4312              :                 for range in &partition.ranges {
    4313              :                     assert!(
    4314              :                         range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
    4315              :                             && range.end.field1 <= METADATA_KEY_END_PREFIX,
    4316              :                         "metadata keys must be partitioned separately"
    4317              :                     );
    4318              :                 }
    4319              :                 if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
    4320              :                     // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
    4321              :                     // might mess up with evictions.
    4322              :                     start = img_range.end;
    4323              :                     continue;
    4324              :                 }
    4325              :                 // For initial and force modes, we always generate image layers for metadata keys.
    4326              :             } else if let ImageLayerCreationMode::Try = mode {
    4327              :                 // check_for_image_layers = false -> skip
    4328              :                 // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
    4329              :                 if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
    4330              :                     start = img_range.end;
    4331              :                     continue;
    4332              :                 }
    4333              :             }
    4334              :             if let ImageLayerCreationMode::Force = mode {
    4335              :                 // When forced to create image layers, we might try and create them where they already
    4336              :                 // exist.  This mode is only used in tests/debug.
    4337              :                 let layers = self.layers.read().await;
    4338              :                 if layers.contains_key(&PersistentLayerKey {
    4339              :                     key_range: img_range.clone(),
    4340              :                     lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
    4341              :                     is_delta: false,
    4342              :                 }) {
    4343              :                     tracing::info!(
    4344              :                         "Skipping image layer at {lsn} {}..{}, already exists",
    4345              :                         img_range.start,
    4346              :                         img_range.end
    4347              :                     );
    4348              :                     start = img_range.end;
    4349              :                     continue;
    4350              :                 }
    4351              :             }
    4352              : 
    4353              :             let image_layer_writer = ImageLayerWriter::new(
    4354              :                 self.conf,
    4355              :                 self.timeline_id,
    4356              :                 self.tenant_shard_id,
    4357              :                 &img_range,
    4358              :                 lsn,
    4359              :                 ctx,
    4360              :             )
    4361              :             .await?;
    4362              : 
    4363            0 :             fail_point!("image-layer-writer-fail-before-finish", |_| {
    4364            0 :                 Err(CreateImageLayersError::Other(anyhow::anyhow!(
    4365            0 :                     "failpoint image-layer-writer-fail-before-finish"
    4366            0 :                 )))
    4367            0 :             });
    4368              : 
    4369              :             if !compact_metadata {
    4370              :                 let ImageLayerCreationOutcome {
    4371              :                     image,
    4372              :                     next_start_key,
    4373              :                 } = self
    4374              :                     .create_image_layer_for_rel_blocks(
    4375              :                         partition,
    4376              :                         image_layer_writer,
    4377              :                         lsn,
    4378              :                         ctx,
    4379              :                         img_range,
    4380              :                         start,
    4381              :                     )
    4382              :                     .await?;
    4383              : 
    4384              :                 start = next_start_key;
    4385              :                 image_layers.extend(image);
    4386              :             } else {
    4387              :                 let ImageLayerCreationOutcome {
    4388              :                     image,
    4389              :                     next_start_key,
    4390              :                 } = self
    4391              :                     .create_image_layer_for_metadata_keys(
    4392              :                         partition,
    4393              :                         image_layer_writer,
    4394              :                         lsn,
    4395              :                         ctx,
    4396              :                         img_range,
    4397              :                         mode,
    4398              :                         start,
    4399              :                     )
    4400              :                     .await?;
    4401              :                 start = next_start_key;
    4402              :                 image_layers.extend(image);
    4403              :             }
    4404              :         }
    4405              : 
    4406              :         let mut guard = self.layers.write().await;
    4407              : 
    4408              :         // FIXME: we could add the images to be uploaded *before* returning from here, but right
    4409              :         // now they are being scheduled outside of write lock; current way is inconsistent with
    4410              :         // compaction lock order.
    4411              :         guard
    4412              :             .open_mut()?
    4413              :             .track_new_image_layers(&image_layers, &self.metrics);
    4414              :         drop_wlock(guard);
    4415              :         timer.stop_and_record();
    4416              : 
    4417              :         // Creating image layers may have caused some previously visible layers to be covered
    4418              :         if !image_layers.is_empty() {
    4419              :             self.update_layer_visibility().await?;
    4420              :         }
    4421              : 
    4422              :         Ok(image_layers)
    4423              :     }
    4424              : 
    4425              :     /// Wait until the background initial logical size calculation is complete, or
    4426              :     /// this Timeline is shut down.  Calling this function will cause the initial
    4427              :     /// logical size calculation to skip waiting for the background jobs barrier.
    4428            0 :     pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
    4429            0 :         if !self.shard_identity.is_shard_zero() {
    4430              :             // We don't populate logical size on shard >0: skip waiting for it.
    4431            0 :             return;
    4432            0 :         }
    4433            0 : 
    4434            0 :         if self.remote_client.is_deleting() {
    4435              :             // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
    4436            0 :             return;
    4437            0 :         }
    4438            0 : 
    4439            0 :         if self.current_logical_size.current_size().is_exact() {
    4440              :             // root timelines are initialized with exact count, but never start the background
    4441              :             // calculation
    4442            0 :             return;
    4443            0 :         }
    4444              : 
    4445            0 :         if let Some(await_bg_cancel) = self
    4446            0 :             .current_logical_size
    4447            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore
    4448            0 :             .get()
    4449            0 :         {
    4450            0 :             await_bg_cancel.cancel();
    4451            0 :         } else {
    4452              :             // We should not wait if we were not able to explicitly instruct
    4453              :             // the logical size cancellation to skip the concurrency limit semaphore.
    4454              :             // TODO: this is an unexpected case.  We should restructure so that it
    4455              :             // can't happen.
    4456            0 :             tracing::warn!(
    4457            0 :                 "await_initial_logical_size: can't get semaphore cancel token, skipping"
    4458              :             );
    4459            0 :             debug_assert!(false);
    4460              :         }
    4461              : 
    4462            0 :         tokio::select!(
    4463            0 :             _ = self.current_logical_size.initialized.acquire() => {},
    4464            0 :             _ = self.cancel.cancelled() => {}
    4465              :         )
    4466            0 :     }
    4467              : 
    4468              :     /// Detach this timeline from its ancestor by copying all of ancestors layers as this
    4469              :     /// Timelines layers up to the ancestor_lsn.
    4470              :     ///
    4471              :     /// Requires a timeline that:
    4472              :     /// - has an ancestor to detach from
    4473              :     /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
    4474              :     ///   a technical requirement
    4475              :     ///
    4476              :     /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
    4477              :     /// polled again until completion.
    4478              :     ///
    4479              :     /// During the operation all timelines sharing the data with this timeline will be reparented
    4480              :     /// from our ancestor to be branches of this timeline.
    4481            0 :     pub(crate) async fn prepare_to_detach_from_ancestor(
    4482            0 :         self: &Arc<Timeline>,
    4483            0 :         tenant: &crate::tenant::Tenant,
    4484            0 :         options: detach_ancestor::Options,
    4485            0 :         ctx: &RequestContext,
    4486            0 :     ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
    4487            0 :         detach_ancestor::prepare(self, tenant, options, ctx).await
    4488            0 :     }
    4489              : 
    4490              :     /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
    4491              :     /// reparents any reparentable children of previous ancestor.
    4492              :     ///
    4493              :     /// This method is to be called while holding the TenantManager's tenant slot, so during this
    4494              :     /// method we cannot be deleted nor can any timeline be deleted. After this method returns
    4495              :     /// successfully, tenant must be reloaded.
    4496              :     ///
    4497              :     /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
    4498              :     /// resetting the tenant.
    4499            0 :     pub(crate) async fn detach_from_ancestor_and_reparent(
    4500            0 :         self: &Arc<Timeline>,
    4501            0 :         tenant: &crate::tenant::Tenant,
    4502            0 :         prepared: detach_ancestor::PreparedTimelineDetach,
    4503            0 :         ctx: &RequestContext,
    4504            0 :     ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
    4505            0 :         detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
    4506            0 :     }
    4507              : 
    4508              :     /// Final step which unblocks the GC.
    4509              :     ///
    4510              :     /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
    4511            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    4512            0 :         self: &Arc<Timeline>,
    4513            0 :         tenant: &crate::tenant::Tenant,
    4514            0 :         attempt: detach_ancestor::Attempt,
    4515            0 :         ctx: &RequestContext,
    4516            0 :     ) -> Result<(), detach_ancestor::Error> {
    4517            0 :         detach_ancestor::complete(self, tenant, attempt, ctx).await
    4518            0 :     }
    4519              : }
    4520              : 
    4521              : impl Drop for Timeline {
    4522           10 :     fn drop(&mut self) {
    4523           10 :         if let Some(ancestor) = &self.ancestor_timeline {
    4524              :             // This lock should never be poisoned, but in case it is we do a .map() instead of
    4525              :             // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
    4526            4 :             if let Ok(mut gc_info) = ancestor.gc_info.write() {
    4527            4 :                 if !gc_info.remove_child_not_offloaded(self.timeline_id) {
    4528            0 :                     tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
    4529            0 :                         "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
    4530            4 :                 }
    4531            0 :             }
    4532            6 :         }
    4533           10 :     }
    4534              : }
    4535              : 
    4536              : /// Top-level failure to compact.
    4537            0 : #[derive(Debug, thiserror::Error)]
    4538              : pub(crate) enum CompactionError {
    4539              :     #[error("The timeline or pageserver is shutting down")]
    4540              :     ShuttingDown,
    4541              :     /// Compaction tried to offload a timeline and failed
    4542              :     #[error("Failed to offload timeline: {0}")]
    4543              :     Offload(OffloadError),
    4544              :     /// Compaction cannot be done right now; page reconstruction and so on.
    4545              :     #[error(transparent)]
    4546              :     Other(anyhow::Error),
    4547              : }
    4548              : 
    4549              : impl From<OffloadError> for CompactionError {
    4550            0 :     fn from(e: OffloadError) -> Self {
    4551            0 :         match e {
    4552            0 :             OffloadError::Cancelled => Self::ShuttingDown,
    4553            0 :             _ => Self::Offload(e),
    4554              :         }
    4555            0 :     }
    4556              : }
    4557              : 
    4558              : impl CompactionError {
    4559            0 :     pub fn is_cancelled(&self) -> bool {
    4560            0 :         matches!(self, CompactionError::ShuttingDown)
    4561            0 :     }
    4562              : }
    4563              : 
    4564              : impl From<CollectKeySpaceError> for CompactionError {
    4565            0 :     fn from(err: CollectKeySpaceError) -> Self {
    4566            0 :         match err {
    4567              :             CollectKeySpaceError::Cancelled
    4568              :             | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
    4569            0 :                 CompactionError::ShuttingDown
    4570              :             }
    4571            0 :             e => CompactionError::Other(e.into()),
    4572              :         }
    4573            0 :     }
    4574              : }
    4575              : 
    4576              : impl From<super::upload_queue::NotInitialized> for CompactionError {
    4577            0 :     fn from(value: super::upload_queue::NotInitialized) -> Self {
    4578            0 :         match value {
    4579              :             super::upload_queue::NotInitialized::Uninitialized => {
    4580            0 :                 CompactionError::Other(anyhow::anyhow!(value))
    4581              :             }
    4582              :             super::upload_queue::NotInitialized::ShuttingDown
    4583            0 :             | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
    4584              :         }
    4585            0 :     }
    4586              : }
    4587              : 
    4588              : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
    4589            0 :     fn from(e: super::storage_layer::layer::DownloadError) -> Self {
    4590            0 :         match e {
    4591              :             super::storage_layer::layer::DownloadError::TimelineShutdown
    4592              :             | super::storage_layer::layer::DownloadError::DownloadCancelled => {
    4593            0 :                 CompactionError::ShuttingDown
    4594              :             }
    4595              :             super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
    4596              :             | super::storage_layer::layer::DownloadError::DownloadRequired
    4597              :             | super::storage_layer::layer::DownloadError::NotFile(_)
    4598              :             | super::storage_layer::layer::DownloadError::DownloadFailed
    4599              :             | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
    4600            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    4601              :             }
    4602              :             #[cfg(test)]
    4603              :             super::storage_layer::layer::DownloadError::Failpoint(_) => {
    4604            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    4605              :             }
    4606              :         }
    4607            0 :     }
    4608              : }
    4609              : 
    4610              : impl From<layer_manager::Shutdown> for CompactionError {
    4611            0 :     fn from(_: layer_manager::Shutdown) -> Self {
    4612            0 :         CompactionError::ShuttingDown
    4613            0 :     }
    4614              : }
    4615              : 
    4616              : #[serde_as]
    4617          196 : #[derive(serde::Serialize)]
    4618              : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
    4619              : 
    4620              : #[derive(Default)]
    4621              : enum DurationRecorder {
    4622              :     #[default]
    4623              :     NotStarted,
    4624              :     Recorded(RecordedDuration, tokio::time::Instant),
    4625              : }
    4626              : 
    4627              : impl DurationRecorder {
    4628          504 :     fn till_now(&self) -> DurationRecorder {
    4629          504 :         match self {
    4630              :             DurationRecorder::NotStarted => {
    4631            0 :                 panic!("must only call on recorded measurements")
    4632              :             }
    4633          504 :             DurationRecorder::Recorded(_, ended) => {
    4634          504 :                 let now = tokio::time::Instant::now();
    4635          504 :                 DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
    4636          504 :             }
    4637          504 :         }
    4638          504 :     }
    4639          196 :     fn into_recorded(self) -> Option<RecordedDuration> {
    4640          196 :         match self {
    4641            0 :             DurationRecorder::NotStarted => None,
    4642          196 :             DurationRecorder::Recorded(recorded, _) => Some(recorded),
    4643              :         }
    4644          196 :     }
    4645              : }
    4646              : 
    4647              : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
    4648              : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
    4649              : /// the layer descriptor requires the user to provide the ranges, which should cover all
    4650              : /// keys specified in the `data` field.
    4651              : #[cfg(test)]
    4652              : #[derive(Clone)]
    4653              : pub struct DeltaLayerTestDesc {
    4654              :     pub lsn_range: Range<Lsn>,
    4655              :     pub key_range: Range<Key>,
    4656              :     pub data: Vec<(Key, Lsn, Value)>,
    4657              : }
    4658              : 
    4659              : #[cfg(test)]
    4660              : impl DeltaLayerTestDesc {
    4661            2 :     pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
    4662            2 :         Self {
    4663            2 :             lsn_range,
    4664            2 :             key_range,
    4665            2 :             data,
    4666            2 :         }
    4667            2 :     }
    4668              : 
    4669           72 :     pub fn new_with_inferred_key_range(
    4670           72 :         lsn_range: Range<Lsn>,
    4671           72 :         data: Vec<(Key, Lsn, Value)>,
    4672           72 :     ) -> Self {
    4673          196 :         let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
    4674          196 :         let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
    4675           72 :         Self {
    4676           72 :             key_range: (*key_min)..(key_max.next()),
    4677           72 :             lsn_range,
    4678           72 :             data,
    4679           72 :         }
    4680           72 :     }
    4681              : 
    4682           10 :     pub(crate) fn layer_name(&self) -> LayerName {
    4683           10 :         LayerName::Delta(super::storage_layer::DeltaLayerName {
    4684           10 :             key_range: self.key_range.clone(),
    4685           10 :             lsn_range: self.lsn_range.clone(),
    4686           10 :         })
    4687           10 :     }
    4688              : }
    4689              : 
    4690              : impl Timeline {
    4691           28 :     async fn finish_compact_batch(
    4692           28 :         self: &Arc<Self>,
    4693           28 :         new_deltas: &[ResidentLayer],
    4694           28 :         new_images: &[ResidentLayer],
    4695           28 :         layers_to_remove: &[Layer],
    4696           28 :     ) -> Result<(), CompactionError> {
    4697           28 :         let mut guard = tokio::select! {
    4698           28 :             guard = self.layers.write() => guard,
    4699           28 :             _ = self.cancel.cancelled() => {
    4700            0 :                 return Err(CompactionError::ShuttingDown);
    4701              :             }
    4702              :         };
    4703              : 
    4704           28 :         let mut duplicated_layers = HashSet::new();
    4705           28 : 
    4706           28 :         let mut insert_layers = Vec::with_capacity(new_deltas.len());
    4707              : 
    4708          336 :         for l in new_deltas {
    4709          308 :             if guard.contains(l.as_ref()) {
    4710              :                 // expected in tests
    4711            0 :                 tracing::error!(layer=%l, "duplicated L1 layer");
    4712              : 
    4713              :                 // good ways to cause a duplicate: we repeatedly error after taking the writelock
    4714              :                 // `guard`  on self.layers. as of writing this, there are no error returns except
    4715              :                 // for compact_level0_phase1 creating an L0, which does not happen in practice
    4716              :                 // because we have not implemented L0 => L0 compaction.
    4717            0 :                 duplicated_layers.insert(l.layer_desc().key());
    4718          308 :             } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
    4719            0 :                 return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
    4720          308 :             } else {
    4721          308 :                 insert_layers.push(l.clone());
    4722          308 :             }
    4723              :         }
    4724              : 
    4725              :         // only remove those inputs which were not outputs
    4726           28 :         let remove_layers: Vec<Layer> = layers_to_remove
    4727           28 :             .iter()
    4728          402 :             .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
    4729           28 :             .cloned()
    4730           28 :             .collect();
    4731           28 : 
    4732           28 :         if !new_images.is_empty() {
    4733            0 :             guard
    4734            0 :                 .open_mut()?
    4735            0 :                 .track_new_image_layers(new_images, &self.metrics);
    4736           28 :         }
    4737              : 
    4738           28 :         guard
    4739           28 :             .open_mut()?
    4740           28 :             .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
    4741           28 : 
    4742           28 :         self.remote_client
    4743           28 :             .schedule_compaction_update(&remove_layers, new_deltas)?;
    4744              : 
    4745           28 :         drop_wlock(guard);
    4746           28 : 
    4747           28 :         Ok(())
    4748           28 :     }
    4749              : 
    4750            0 :     async fn rewrite_layers(
    4751            0 :         self: &Arc<Self>,
    4752            0 :         mut replace_layers: Vec<(Layer, ResidentLayer)>,
    4753            0 :         mut drop_layers: Vec<Layer>,
    4754            0 :     ) -> Result<(), CompactionError> {
    4755            0 :         let mut guard = self.layers.write().await;
    4756              : 
    4757              :         // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
    4758              :         // to avoid double-removing, and avoid rewriting something that was removed.
    4759            0 :         replace_layers.retain(|(l, _)| guard.contains(l));
    4760            0 :         drop_layers.retain(|l| guard.contains(l));
    4761            0 : 
    4762            0 :         guard
    4763            0 :             .open_mut()?
    4764            0 :             .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
    4765            0 : 
    4766            0 :         let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
    4767            0 : 
    4768            0 :         self.remote_client
    4769            0 :             .schedule_compaction_update(&drop_layers, &upload_layers)?;
    4770              : 
    4771            0 :         Ok(())
    4772            0 :     }
    4773              : 
    4774              :     /// Schedules the uploads of the given image layers
    4775          364 :     fn upload_new_image_layers(
    4776          364 :         self: &Arc<Self>,
    4777          364 :         new_images: impl IntoIterator<Item = ResidentLayer>,
    4778          364 :     ) -> Result<(), super::upload_queue::NotInitialized> {
    4779          390 :         for layer in new_images {
    4780           26 :             self.remote_client.schedule_layer_file_upload(layer)?;
    4781              :         }
    4782              :         // should any new image layer been created, not uploading index_part will
    4783              :         // result in a mismatch between remote_physical_size and layermap calculated
    4784              :         // size, which will fail some tests, but should not be an issue otherwise.
    4785          364 :         self.remote_client
    4786          364 :             .schedule_index_upload_for_file_changes()?;
    4787          364 :         Ok(())
    4788          364 :     }
    4789              : 
    4790              :     /// Find the Lsns above which layer files need to be retained on
    4791              :     /// garbage collection.
    4792              :     ///
    4793              :     /// We calculate two cutoffs, one based on time and one based on WAL size.  `pitr`
    4794              :     /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
    4795              :     /// the space-based retention.
    4796              :     ///
    4797              :     /// This function doesn't simply to calculate time & space based retention: it treats time-based
    4798              :     /// retention as authoritative if enabled, and falls back to space-based retention if calculating
    4799              :     /// the LSN for a time point isn't possible.  Therefore the GcCutoffs::horizon in the response might
    4800              :     /// be different to the `space_cutoff` input.  Callers should treat the min() of the two cutoffs
    4801              :     /// in the response as the GC cutoff point for the timeline.
    4802            4 :     #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
    4803              :     pub(super) async fn find_gc_cutoffs(
    4804              :         &self,
    4805              :         space_cutoff: Lsn,
    4806              :         pitr: Duration,
    4807              :         cancel: &CancellationToken,
    4808              :         ctx: &RequestContext,
    4809              :     ) -> Result<GcCutoffs, PageReconstructError> {
    4810              :         let _timer = self
    4811              :             .metrics
    4812              :             .find_gc_cutoffs_histo
    4813              :             .start_timer()
    4814              :             .record_on_drop();
    4815              : 
    4816              :         pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
    4817              : 
    4818              :         if cfg!(test) {
    4819              :             // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
    4820              :             if pitr == Duration::ZERO {
    4821              :                 return Ok(GcCutoffs {
    4822              :                     time: self.get_last_record_lsn(),
    4823              :                     space: space_cutoff,
    4824              :                 });
    4825              :             }
    4826              :         }
    4827              : 
    4828              :         // Calculate a time-based limit on how much to retain:
    4829              :         // - if PITR interval is set, then this is our cutoff.
    4830              :         // - if PITR interval is not set, then we do a lookup
    4831              :         //   based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
    4832              :         let time_cutoff = {
    4833              :             let now = SystemTime::now();
    4834              :             let time_range = if pitr == Duration::ZERO {
    4835              :                 humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
    4836              :             } else {
    4837              :                 pitr
    4838              :             };
    4839              : 
    4840              :             // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
    4841              :             let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
    4842              :             let timestamp = to_pg_timestamp(time_cutoff);
    4843              : 
    4844              :             match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
    4845              :                 LsnForTimestamp::Present(lsn) => Some(lsn),
    4846              :                 LsnForTimestamp::Future(lsn) => {
    4847              :                     // The timestamp is in the future. That sounds impossible,
    4848              :                     // but what it really means is that there hasn't been
    4849              :                     // any commits since the cutoff timestamp.
    4850              :                     //
    4851              :                     // In this case we should use the LSN of the most recent commit,
    4852              :                     // which is implicitly the last LSN in the log.
    4853              :                     debug!("future({})", lsn);
    4854              :                     Some(self.get_last_record_lsn())
    4855              :                 }
    4856              :                 LsnForTimestamp::Past(lsn) => {
    4857              :                     debug!("past({})", lsn);
    4858              :                     None
    4859              :                 }
    4860              :                 LsnForTimestamp::NoData(lsn) => {
    4861              :                     debug!("nodata({})", lsn);
    4862              :                     None
    4863              :                 }
    4864              :             }
    4865              :         };
    4866              : 
    4867              :         Ok(match (pitr, time_cutoff) {
    4868              :             (Duration::ZERO, Some(time_cutoff)) => {
    4869              :                 // PITR is not set. Retain the size-based limit, or the default time retention,
    4870              :                 // whichever requires less data.
    4871              :                 GcCutoffs {
    4872              :                     time: self.get_last_record_lsn(),
    4873              :                     space: std::cmp::max(time_cutoff, space_cutoff),
    4874              :                 }
    4875              :             }
    4876              :             (Duration::ZERO, None) => {
    4877              :                 // PITR is not set, and time lookup failed
    4878              :                 GcCutoffs {
    4879              :                     time: self.get_last_record_lsn(),
    4880              :                     space: space_cutoff,
    4881              :                 }
    4882              :             }
    4883              :             (_, None) => {
    4884              :                 // PITR interval is set & we didn't look up a timestamp successfully.  Conservatively assume PITR
    4885              :                 // cannot advance beyond what was already GC'd, and respect space-based retention
    4886              :                 GcCutoffs {
    4887              :                     time: *self.get_latest_gc_cutoff_lsn(),
    4888              :                     space: space_cutoff,
    4889              :                 }
    4890              :             }
    4891              :             (_, Some(time_cutoff)) => {
    4892              :                 // PITR interval is set and we looked up timestamp successfully.  Ignore
    4893              :                 // size based retention and make time cutoff authoritative
    4894              :                 GcCutoffs {
    4895              :                     time: time_cutoff,
    4896              :                     space: time_cutoff,
    4897              :                 }
    4898              :             }
    4899              :         })
    4900              :     }
    4901              : 
    4902              :     /// Garbage collect layer files on a timeline that are no longer needed.
    4903              :     ///
    4904              :     /// Currently, we don't make any attempt at removing unneeded page versions
    4905              :     /// within a layer file. We can only remove the whole file if it's fully
    4906              :     /// obsolete.
    4907            4 :     pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
    4908              :         // this is most likely the background tasks, but it might be the spawned task from
    4909              :         // immediate_gc
    4910            4 :         let _g = tokio::select! {
    4911            4 :             guard = self.gc_lock.lock() => guard,
    4912            4 :             _ = self.cancel.cancelled() => return Ok(GcResult::default()),
    4913              :         };
    4914            4 :         let timer = self.metrics.garbage_collect_histo.start_timer();
    4915            4 : 
    4916            4 :         fail_point!("before-timeline-gc");
    4917            4 : 
    4918            4 :         // Is the timeline being deleted?
    4919            4 :         if self.is_stopping() {
    4920            0 :             return Err(GcError::TimelineCancelled);
    4921            4 :         }
    4922            4 : 
    4923            4 :         let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
    4924            4 :             let gc_info = self.gc_info.read().unwrap();
    4925            4 : 
    4926            4 :             let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
    4927            4 :             let time_cutoff = gc_info.cutoffs.time;
    4928            4 :             let retain_lsns = gc_info
    4929            4 :                 .retain_lsns
    4930            4 :                 .iter()
    4931            4 :                 .map(|(lsn, _child_id, _is_offloaded)| *lsn)
    4932            4 :                 .collect();
    4933            4 : 
    4934            4 :             // Gets the maximum LSN that holds the valid lease.
    4935            4 :             //
    4936            4 :             // Caveat: `refresh_gc_info` is in charged of updating the lease map.
    4937            4 :             // Here, we do not check for stale leases again.
    4938            4 :             let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
    4939            4 : 
    4940            4 :             (
    4941            4 :                 space_cutoff,
    4942            4 :                 time_cutoff,
    4943            4 :                 retain_lsns,
    4944            4 :                 max_lsn_with_valid_lease,
    4945            4 :             )
    4946            4 :         };
    4947            4 : 
    4948            4 :         let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
    4949            4 :         let standby_horizon = self.standby_horizon.load();
    4950            4 :         // Hold GC for the standby, but as a safety guard do it only within some
    4951            4 :         // reasonable lag.
    4952            4 :         if standby_horizon != Lsn::INVALID {
    4953            0 :             if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
    4954              :                 const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
    4955            0 :                 if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
    4956            0 :                     new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
    4957            0 :                     trace!("holding off GC for standby apply LSN {}", standby_horizon);
    4958              :                 } else {
    4959            0 :                     warn!(
    4960            0 :                         "standby is lagging for more than {}MB, not holding gc for it",
    4961            0 :                         MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
    4962              :                     )
    4963              :                 }
    4964            0 :             }
    4965            4 :         }
    4966              : 
    4967              :         // Reset standby horizon to ignore it if it is not updated till next GC.
    4968              :         // It is an easy way to unset it when standby disappears without adding
    4969              :         // more conf options.
    4970            4 :         self.standby_horizon.store(Lsn::INVALID);
    4971            4 :         self.metrics
    4972            4 :             .standby_horizon_gauge
    4973            4 :             .set(Lsn::INVALID.0 as i64);
    4974              : 
    4975            4 :         let res = self
    4976            4 :             .gc_timeline(
    4977            4 :                 space_cutoff,
    4978            4 :                 time_cutoff,
    4979            4 :                 retain_lsns,
    4980            4 :                 max_lsn_with_valid_lease,
    4981            4 :                 new_gc_cutoff,
    4982            4 :             )
    4983            4 :             .instrument(
    4984            4 :                 info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
    4985              :             )
    4986            0 :             .await?;
    4987              : 
    4988              :         // only record successes
    4989            4 :         timer.stop_and_record();
    4990            4 : 
    4991            4 :         Ok(res)
    4992            4 :     }
    4993              : 
    4994            4 :     async fn gc_timeline(
    4995            4 :         &self,
    4996            4 :         space_cutoff: Lsn,
    4997            4 :         time_cutoff: Lsn,
    4998            4 :         retain_lsns: Vec<Lsn>,
    4999            4 :         max_lsn_with_valid_lease: Option<Lsn>,
    5000            4 :         new_gc_cutoff: Lsn,
    5001            4 :     ) -> Result<GcResult, GcError> {
    5002            4 :         // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
    5003            4 : 
    5004            4 :         let now = SystemTime::now();
    5005            4 :         let mut result: GcResult = GcResult::default();
    5006            4 : 
    5007            4 :         // Nothing to GC. Return early.
    5008            4 :         let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
    5009            4 :         if latest_gc_cutoff >= new_gc_cutoff {
    5010            0 :             info!(
    5011            0 :                 "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
    5012              :             );
    5013            0 :             return Ok(result);
    5014            4 :         }
    5015              : 
    5016              :         // We need to ensure that no one tries to read page versions or create
    5017              :         // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
    5018              :         // for details. This will block until the old value is no longer in use.
    5019              :         //
    5020              :         // The GC cutoff should only ever move forwards.
    5021            4 :         let waitlist = {
    5022            4 :             let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
    5023            4 :             if *write_guard > new_gc_cutoff {
    5024            0 :                 return Err(GcError::BadLsn {
    5025            0 :                     why: format!(
    5026            0 :                         "Cannot move GC cutoff LSN backwards (was {}, new {})",
    5027            0 :                         *write_guard, new_gc_cutoff
    5028            0 :                     ),
    5029            0 :                 });
    5030            4 :             }
    5031            4 : 
    5032            4 :             write_guard.store_and_unlock(new_gc_cutoff)
    5033            4 :         };
    5034            4 :         waitlist.wait().await;
    5035              : 
    5036            4 :         info!("GC starting");
    5037              : 
    5038            4 :         debug!("retain_lsns: {:?}", retain_lsns);
    5039              : 
    5040            4 :         let mut layers_to_remove = Vec::new();
    5041              : 
    5042              :         // Scan all layers in the timeline (remote or on-disk).
    5043              :         //
    5044              :         // Garbage collect the layer if all conditions are satisfied:
    5045              :         // 1. it is older than cutoff LSN;
    5046              :         // 2. it is older than PITR interval;
    5047              :         // 3. it doesn't need to be retained for 'retain_lsns';
    5048              :         // 4. it does not need to be kept for LSNs holding valid leases.
    5049              :         // 5. newer on-disk image layers cover the layer's whole key range
    5050              :         //
    5051              :         // TODO holding a write lock is too agressive and avoidable
    5052            4 :         let mut guard = self.layers.write().await;
    5053            4 :         let layers = guard.layer_map()?;
    5054           24 :         'outer: for l in layers.iter_historic_layers() {
    5055           24 :             result.layers_total += 1;
    5056           24 : 
    5057           24 :             // 1. Is it newer than GC horizon cutoff point?
    5058           24 :             if l.get_lsn_range().end > space_cutoff {
    5059            2 :                 info!(
    5060            0 :                     "keeping {} because it's newer than space_cutoff {}",
    5061            0 :                     l.layer_name(),
    5062              :                     space_cutoff,
    5063              :                 );
    5064            2 :                 result.layers_needed_by_cutoff += 1;
    5065            2 :                 continue 'outer;
    5066           22 :             }
    5067           22 : 
    5068           22 :             // 2. It is newer than PiTR cutoff point?
    5069           22 :             if l.get_lsn_range().end > time_cutoff {
    5070            0 :                 info!(
    5071            0 :                     "keeping {} because it's newer than time_cutoff {}",
    5072            0 :                     l.layer_name(),
    5073              :                     time_cutoff,
    5074              :                 );
    5075            0 :                 result.layers_needed_by_pitr += 1;
    5076            0 :                 continue 'outer;
    5077           22 :             }
    5078              : 
    5079              :             // 3. Is it needed by a child branch?
    5080              :             // NOTE With that we would keep data that
    5081              :             // might be referenced by child branches forever.
    5082              :             // We can track this in child timeline GC and delete parent layers when
    5083              :             // they are no longer needed. This might be complicated with long inheritance chains.
    5084              :             //
    5085              :             // TODO Vec is not a great choice for `retain_lsns`
    5086           22 :             for retain_lsn in &retain_lsns {
    5087              :                 // start_lsn is inclusive
    5088            0 :                 if &l.get_lsn_range().start <= retain_lsn {
    5089            0 :                     info!(
    5090            0 :                         "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
    5091            0 :                         l.layer_name(),
    5092            0 :                         retain_lsn,
    5093            0 :                         l.is_incremental(),
    5094              :                     );
    5095            0 :                     result.layers_needed_by_branches += 1;
    5096            0 :                     continue 'outer;
    5097            0 :                 }
    5098              :             }
    5099              : 
    5100              :             // 4. Is there a valid lease that requires us to keep this layer?
    5101           22 :             if let Some(lsn) = &max_lsn_with_valid_lease {
    5102              :                 // keep if layer start <= any of the lease
    5103           18 :                 if &l.get_lsn_range().start <= lsn {
    5104           14 :                     info!(
    5105            0 :                         "keeping {} because there is a valid lease preventing GC at {}",
    5106            0 :                         l.layer_name(),
    5107              :                         lsn,
    5108              :                     );
    5109           14 :                     result.layers_needed_by_leases += 1;
    5110           14 :                     continue 'outer;
    5111            4 :                 }
    5112            4 :             }
    5113              : 
    5114              :             // 5. Is there a later on-disk layer for this relation?
    5115              :             //
    5116              :             // The end-LSN is exclusive, while disk_consistent_lsn is
    5117              :             // inclusive. For example, if disk_consistent_lsn is 100, it is
    5118              :             // OK for a delta layer to have end LSN 101, but if the end LSN
    5119              :             // is 102, then it might not have been fully flushed to disk
    5120              :             // before crash.
    5121              :             //
    5122              :             // For example, imagine that the following layers exist:
    5123              :             //
    5124              :             // 1000      - image (A)
    5125              :             // 1000-2000 - delta (B)
    5126              :             // 2000      - image (C)
    5127              :             // 2000-3000 - delta (D)
    5128              :             // 3000      - image (E)
    5129              :             //
    5130              :             // If GC horizon is at 2500, we can remove layers A and B, but
    5131              :             // we cannot remove C, even though it's older than 2500, because
    5132              :             // the delta layer 2000-3000 depends on it.
    5133            8 :             if !layers
    5134            8 :                 .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
    5135              :             {
    5136            6 :                 info!("keeping {} because it is the latest layer", l.layer_name());
    5137            6 :                 result.layers_not_updated += 1;
    5138            6 :                 continue 'outer;
    5139            2 :             }
    5140            2 : 
    5141            2 :             // We didn't find any reason to keep this file, so remove it.
    5142            2 :             info!(
    5143            0 :                 "garbage collecting {} is_dropped: xx is_incremental: {}",
    5144            0 :                 l.layer_name(),
    5145            0 :                 l.is_incremental(),
    5146              :             );
    5147            2 :             layers_to_remove.push(l);
    5148              :         }
    5149              : 
    5150            4 :         if !layers_to_remove.is_empty() {
    5151              :             // Persist the new GC cutoff value before we actually remove anything.
    5152              :             // This unconditionally schedules also an index_part.json update, even though, we will
    5153              :             // be doing one a bit later with the unlinked gc'd layers.
    5154            2 :             let disk_consistent_lsn = self.disk_consistent_lsn.load();
    5155            2 :             self.schedule_uploads(disk_consistent_lsn, None)
    5156            2 :                 .map_err(|e| {
    5157            0 :                     if self.cancel.is_cancelled() {
    5158            0 :                         GcError::TimelineCancelled
    5159              :                     } else {
    5160            0 :                         GcError::Remote(e)
    5161              :                     }
    5162            2 :                 })?;
    5163              : 
    5164            2 :             let gc_layers = layers_to_remove
    5165            2 :                 .iter()
    5166            2 :                 .map(|x| guard.get_from_desc(x))
    5167            2 :                 .collect::<Vec<Layer>>();
    5168            2 : 
    5169            2 :             result.layers_removed = gc_layers.len() as u64;
    5170            2 : 
    5171            2 :             self.remote_client.schedule_gc_update(&gc_layers)?;
    5172              : 
    5173            2 :             guard.open_mut()?.finish_gc_timeline(&gc_layers);
    5174            2 : 
    5175            2 :             #[cfg(feature = "testing")]
    5176            2 :             {
    5177            2 :                 result.doomed_layers = gc_layers;
    5178            2 :             }
    5179            2 :         }
    5180              : 
    5181            4 :         info!(
    5182            0 :             "GC completed removing {} layers, cutoff {}",
    5183              :             result.layers_removed, new_gc_cutoff
    5184              :         );
    5185              : 
    5186            4 :         result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
    5187            4 :         Ok(result)
    5188            4 :     }
    5189              : 
    5190              :     /// Reconstruct a value, using the given base image and WAL records in 'data'.
    5191       667406 :     async fn reconstruct_value(
    5192       667406 :         &self,
    5193       667406 :         key: Key,
    5194       667406 :         request_lsn: Lsn,
    5195       667406 :         mut data: ValueReconstructState,
    5196       667406 :     ) -> Result<Bytes, PageReconstructError> {
    5197       667406 :         // Perform WAL redo if needed
    5198       667406 :         data.records.reverse();
    5199       667406 : 
    5200       667406 :         // If we have a page image, and no WAL, we're all set
    5201       667406 :         if data.records.is_empty() {
    5202       666996 :             if let Some((img_lsn, img)) = &data.img {
    5203       666996 :                 trace!(
    5204            0 :                     "found page image for key {} at {}, no WAL redo required, req LSN {}",
    5205              :                     key,
    5206              :                     img_lsn,
    5207              :                     request_lsn,
    5208              :                 );
    5209       666996 :                 Ok(img.clone())
    5210              :             } else {
    5211            0 :                 Err(PageReconstructError::from(anyhow!(
    5212            0 :                     "base image for {key} at {request_lsn} not found"
    5213            0 :                 )))
    5214              :             }
    5215              :         } else {
    5216              :             // We need to do WAL redo.
    5217              :             //
    5218              :             // If we don't have a base image, then the oldest WAL record better initialize
    5219              :             // the page
    5220          410 :             if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
    5221            0 :                 Err(PageReconstructError::from(anyhow!(
    5222            0 :                     "Base image for {} at {} not found, but got {} WAL records",
    5223            0 :                     key,
    5224            0 :                     request_lsn,
    5225            0 :                     data.records.len()
    5226            0 :                 )))
    5227              :             } else {
    5228          410 :                 if data.img.is_some() {
    5229          344 :                     trace!(
    5230            0 :                         "found {} WAL records and a base image for {} at {}, performing WAL redo",
    5231            0 :                         data.records.len(),
    5232              :                         key,
    5233              :                         request_lsn
    5234              :                     );
    5235              :                 } else {
    5236           66 :                     trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
    5237              :                 };
    5238          410 :                 let res = self
    5239          410 :                     .walredo_mgr
    5240          410 :                     .as_ref()
    5241          410 :                     .context("timeline has no walredo manager")
    5242          410 :                     .map_err(PageReconstructError::WalRedo)?
    5243          410 :                     .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
    5244            0 :                     .await;
    5245          410 :                 let img = match res {
    5246          410 :                     Ok(img) => img,
    5247            0 :                     Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
    5248            0 :                     Err(walredo::Error::Other(e)) => {
    5249            0 :                         return Err(PageReconstructError::WalRedo(
    5250            0 :                             e.context("reconstruct a page image"),
    5251            0 :                         ))
    5252              :                     }
    5253              :                 };
    5254          410 :                 Ok(img)
    5255              :             }
    5256              :         }
    5257       667406 :     }
    5258              : 
    5259            0 :     pub(crate) async fn spawn_download_all_remote_layers(
    5260            0 :         self: Arc<Self>,
    5261            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5262            0 :     ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
    5263              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5264              : 
    5265              :         // this is not really needed anymore; it has tests which really check the return value from
    5266              :         // http api. it would be better not to maintain this anymore.
    5267              : 
    5268            0 :         let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
    5269            0 :         if let Some(st) = &*status_guard {
    5270            0 :             match &st.state {
    5271              :                 DownloadRemoteLayersTaskState::Running => {
    5272            0 :                     return Err(st.clone());
    5273              :                 }
    5274              :                 DownloadRemoteLayersTaskState::ShutDown
    5275            0 :                 | DownloadRemoteLayersTaskState::Completed => {
    5276            0 :                     *status_guard = None;
    5277            0 :                 }
    5278              :             }
    5279            0 :         }
    5280              : 
    5281            0 :         let self_clone = Arc::clone(&self);
    5282            0 :         let task_id = task_mgr::spawn(
    5283            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    5284            0 :             task_mgr::TaskKind::DownloadAllRemoteLayers,
    5285            0 :             self.tenant_shard_id,
    5286            0 :             Some(self.timeline_id),
    5287            0 :             "download all remote layers task",
    5288            0 :             async move {
    5289            0 :                 self_clone.download_all_remote_layers(request).await;
    5290            0 :                 let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
    5291            0 :                  match &mut *status_guard {
    5292              :                     None => {
    5293            0 :                         warn!("tasks status is supposed to be Some(), since we are running");
    5294              :                     }
    5295            0 :                     Some(st) => {
    5296            0 :                         let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
    5297            0 :                         if st.task_id != exp_task_id {
    5298            0 :                             warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
    5299            0 :                         } else {
    5300            0 :                             st.state = DownloadRemoteLayersTaskState::Completed;
    5301            0 :                         }
    5302              :                     }
    5303              :                 };
    5304            0 :                 Ok(())
    5305            0 :             }
    5306            0 :             .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    5307              :         );
    5308              : 
    5309            0 :         let initial_info = DownloadRemoteLayersTaskInfo {
    5310            0 :             task_id: format!("{task_id}"),
    5311            0 :             state: DownloadRemoteLayersTaskState::Running,
    5312            0 :             total_layer_count: 0,
    5313            0 :             successful_download_count: 0,
    5314            0 :             failed_download_count: 0,
    5315            0 :         };
    5316            0 :         *status_guard = Some(initial_info.clone());
    5317            0 : 
    5318            0 :         Ok(initial_info)
    5319            0 :     }
    5320              : 
    5321            0 :     async fn download_all_remote_layers(
    5322            0 :         self: &Arc<Self>,
    5323            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5324            0 :     ) {
    5325              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5326              : 
    5327            0 :         let remaining = {
    5328            0 :             let guard = self.layers.read().await;
    5329            0 :             let Ok(lm) = guard.layer_map() else {
    5330              :                 // technically here we could look into iterating accessible layers, but downloading
    5331              :                 // all layers of a shutdown timeline makes no sense regardless.
    5332            0 :                 tracing::info!("attempted to download all layers of shutdown timeline");
    5333            0 :                 return;
    5334              :             };
    5335            0 :             lm.iter_historic_layers()
    5336            0 :                 .map(|desc| guard.get_from_desc(&desc))
    5337            0 :                 .collect::<Vec<_>>()
    5338            0 :         };
    5339            0 :         let total_layer_count = remaining.len();
    5340              : 
    5341              :         macro_rules! lock_status {
    5342              :             ($st:ident) => {
    5343              :                 let mut st = self.download_all_remote_layers_task_info.write().unwrap();
    5344              :                 let st = st
    5345              :                     .as_mut()
    5346              :                     .expect("this function is only called after the task has been spawned");
    5347              :                 assert_eq!(
    5348              :                     st.task_id,
    5349              :                     format!(
    5350              :                         "{}",
    5351              :                         task_mgr::current_task_id().expect("we run inside a task_mgr task")
    5352              :                     )
    5353              :                 );
    5354              :                 let $st = st;
    5355              :             };
    5356              :         }
    5357              : 
    5358              :         {
    5359            0 :             lock_status!(st);
    5360            0 :             st.total_layer_count = total_layer_count as u64;
    5361            0 :         }
    5362            0 : 
    5363            0 :         let mut remaining = remaining.into_iter();
    5364            0 :         let mut have_remaining = true;
    5365            0 :         let mut js = tokio::task::JoinSet::new();
    5366            0 : 
    5367            0 :         let cancel = task_mgr::shutdown_token();
    5368            0 : 
    5369            0 :         let limit = request.max_concurrent_downloads;
    5370              : 
    5371              :         loop {
    5372            0 :             while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
    5373            0 :                 let Some(next) = remaining.next() else {
    5374            0 :                     have_remaining = false;
    5375            0 :                     break;
    5376              :                 };
    5377              : 
    5378            0 :                 let span = tracing::info_span!("download", layer = %next);
    5379              : 
    5380            0 :                 js.spawn(
    5381            0 :                     async move {
    5382            0 :                         let res = next.download().await;
    5383            0 :                         (next, res)
    5384            0 :                     }
    5385            0 :                     .instrument(span),
    5386            0 :                 );
    5387            0 :             }
    5388              : 
    5389            0 :             while let Some(res) = js.join_next().await {
    5390            0 :                 match res {
    5391              :                     Ok((_, Ok(_))) => {
    5392            0 :                         lock_status!(st);
    5393            0 :                         st.successful_download_count += 1;
    5394              :                     }
    5395            0 :                     Ok((layer, Err(e))) => {
    5396            0 :                         tracing::error!(%layer, "download failed: {e:#}");
    5397            0 :                         lock_status!(st);
    5398            0 :                         st.failed_download_count += 1;
    5399              :                     }
    5400            0 :                     Err(je) if je.is_cancelled() => unreachable!("not used here"),
    5401            0 :                     Err(je) if je.is_panic() => {
    5402            0 :                         lock_status!(st);
    5403            0 :                         st.failed_download_count += 1;
    5404              :                     }
    5405            0 :                     Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
    5406              :                 }
    5407              :             }
    5408              : 
    5409            0 :             if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
    5410            0 :                 break;
    5411            0 :             }
    5412              :         }
    5413              : 
    5414              :         {
    5415            0 :             lock_status!(st);
    5416            0 :             st.state = DownloadRemoteLayersTaskState::Completed;
    5417              :         }
    5418            0 :     }
    5419              : 
    5420            0 :     pub(crate) fn get_download_all_remote_layers_task_info(
    5421            0 :         &self,
    5422            0 :     ) -> Option<DownloadRemoteLayersTaskInfo> {
    5423            0 :         self.download_all_remote_layers_task_info
    5424            0 :             .read()
    5425            0 :             .unwrap()
    5426            0 :             .clone()
    5427            0 :     }
    5428              : }
    5429              : 
    5430              : impl Timeline {
    5431              :     /// Returns non-remote layers for eviction.
    5432            0 :     pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
    5433            0 :         let guard = self.layers.read().await;
    5434            0 :         let mut max_layer_size: Option<u64> = None;
    5435            0 : 
    5436            0 :         let resident_layers = guard
    5437            0 :             .likely_resident_layers()
    5438            0 :             .map(|layer| {
    5439            0 :                 let file_size = layer.layer_desc().file_size;
    5440            0 :                 max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
    5441            0 : 
    5442            0 :                 let last_activity_ts = layer.latest_activity();
    5443            0 : 
    5444            0 :                 EvictionCandidate {
    5445            0 :                     layer: layer.to_owned().into(),
    5446            0 :                     last_activity_ts,
    5447            0 :                     relative_last_activity: finite_f32::FiniteF32::ZERO,
    5448            0 :                     visibility: layer.visibility(),
    5449            0 :                 }
    5450            0 :             })
    5451            0 :             .collect();
    5452            0 : 
    5453            0 :         DiskUsageEvictionInfo {
    5454            0 :             max_layer_size,
    5455            0 :             resident_layers,
    5456            0 :         }
    5457            0 :     }
    5458              : 
    5459         1730 :     pub(crate) fn get_shard_index(&self) -> ShardIndex {
    5460         1730 :         ShardIndex {
    5461         1730 :             shard_number: self.tenant_shard_id.shard_number,
    5462         1730 :             shard_count: self.tenant_shard_id.shard_count,
    5463         1730 :         }
    5464         1730 :     }
    5465              : 
    5466              :     /// Persistently blocks gc for `Manual` reason.
    5467              :     ///
    5468              :     /// Returns true if no such block existed before, false otherwise.
    5469            0 :     pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
    5470              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    5471            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    5472            0 :         tenant.gc_block.insert(self, GcBlockingReason::Manual).await
    5473            0 :     }
    5474              : 
    5475              :     /// Persistently unblocks gc for `Manual` reason.
    5476            0 :     pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
    5477              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    5478            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    5479            0 :         tenant.gc_block.remove(self, GcBlockingReason::Manual).await
    5480            0 :     }
    5481              : 
    5482              :     #[cfg(test)]
    5483           40 :     pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
    5484           40 :         self.last_record_lsn.advance(new_lsn);
    5485           40 :     }
    5486              : 
    5487              :     #[cfg(test)]
    5488            2 :     pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
    5489            2 :         self.disk_consistent_lsn.store(new_value);
    5490            2 :     }
    5491              : 
    5492              :     /// Force create an image layer and place it into the layer map.
    5493              :     ///
    5494              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    5495              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
    5496              :     /// placed into the layer map in one run AND be validated.
    5497              :     #[cfg(test)]
    5498           52 :     pub(super) async fn force_create_image_layer(
    5499           52 :         self: &Arc<Timeline>,
    5500           52 :         lsn: Lsn,
    5501           52 :         mut images: Vec<(Key, Bytes)>,
    5502           52 :         check_start_lsn: Option<Lsn>,
    5503           52 :         ctx: &RequestContext,
    5504           52 :     ) -> anyhow::Result<()> {
    5505           52 :         let last_record_lsn = self.get_last_record_lsn();
    5506           52 :         assert!(
    5507           52 :             lsn <= last_record_lsn,
    5508            0 :             "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
    5509              :         );
    5510           52 :         if let Some(check_start_lsn) = check_start_lsn {
    5511           52 :             assert!(lsn >= check_start_lsn);
    5512            0 :         }
    5513          126 :         images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
    5514           52 :         let min_key = *images.first().map(|(k, _)| k).unwrap();
    5515           52 :         let end_key = images.last().map(|(k, _)| k).unwrap().next();
    5516           52 :         let mut image_layer_writer = ImageLayerWriter::new(
    5517           52 :             self.conf,
    5518           52 :             self.timeline_id,
    5519           52 :             self.tenant_shard_id,
    5520           52 :             &(min_key..end_key),
    5521           52 :             lsn,
    5522           52 :             ctx,
    5523           52 :         )
    5524           26 :         .await?;
    5525          230 :         for (key, img) in images {
    5526          178 :             image_layer_writer.put_image(key, img, ctx).await?;
    5527              :         }
    5528          104 :         let (desc, path) = image_layer_writer.finish(ctx).await?;
    5529           52 :         let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    5530           52 :         info!("force created image layer {}", image_layer.local_path());
    5531              :         {
    5532           52 :             let mut guard = self.layers.write().await;
    5533           52 :             guard.open_mut().unwrap().force_insert_layer(image_layer);
    5534           52 :         }
    5535           52 : 
    5536           52 :         Ok(())
    5537           52 :     }
    5538              : 
    5539              :     /// Force create a delta layer and place it into the layer map.
    5540              :     ///
    5541              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    5542              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
    5543              :     /// placed into the layer map in one run AND be validated.
    5544              :     #[cfg(test)]
    5545           74 :     pub(super) async fn force_create_delta_layer(
    5546           74 :         self: &Arc<Timeline>,
    5547           74 :         mut deltas: DeltaLayerTestDesc,
    5548           74 :         check_start_lsn: Option<Lsn>,
    5549           74 :         ctx: &RequestContext,
    5550           74 :     ) -> anyhow::Result<()> {
    5551           74 :         let last_record_lsn = self.get_last_record_lsn();
    5552           74 :         deltas
    5553           74 :             .data
    5554          124 :             .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
    5555           74 :         assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
    5556           74 :         assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
    5557          272 :         for (_, lsn, _) in &deltas.data {
    5558          198 :             assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
    5559              :         }
    5560           74 :         assert!(
    5561           74 :             deltas.lsn_range.end <= last_record_lsn,
    5562            0 :             "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
    5563              :             deltas.lsn_range.end,
    5564              :             last_record_lsn
    5565              :         );
    5566           74 :         if let Some(check_start_lsn) = check_start_lsn {
    5567           74 :             assert!(deltas.lsn_range.start >= check_start_lsn);
    5568            0 :         }
    5569           74 :         let mut delta_layer_writer = DeltaLayerWriter::new(
    5570           74 :             self.conf,
    5571           74 :             self.timeline_id,
    5572           74 :             self.tenant_shard_id,
    5573           74 :             deltas.key_range.start,
    5574           74 :             deltas.lsn_range,
    5575           74 :             ctx,
    5576           74 :         )
    5577           37 :         .await?;
    5578          272 :         for (key, lsn, val) in deltas.data {
    5579          198 :             delta_layer_writer.put_value(key, lsn, val, ctx).await?;
    5580              :         }
    5581          185 :         let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
    5582           74 :         let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    5583           74 :         info!("force created delta layer {}", delta_layer.local_path());
    5584              :         {
    5585           74 :             let mut guard = self.layers.write().await;
    5586           74 :             guard.open_mut().unwrap().force_insert_layer(delta_layer);
    5587           74 :         }
    5588           74 : 
    5589           74 :         Ok(())
    5590           74 :     }
    5591              : 
    5592              :     /// Return all keys at the LSN in the image layers
    5593              :     #[cfg(test)]
    5594            6 :     pub(crate) async fn inspect_image_layers(
    5595            6 :         self: &Arc<Timeline>,
    5596            6 :         lsn: Lsn,
    5597            6 :         ctx: &RequestContext,
    5598            6 :     ) -> anyhow::Result<Vec<(Key, Bytes)>> {
    5599            6 :         let mut all_data = Vec::new();
    5600            6 :         let guard = self.layers.read().await;
    5601           34 :         for layer in guard.layer_map()?.iter_historic_layers() {
    5602           34 :             if !layer.is_delta() && layer.image_layer_lsn() == lsn {
    5603            8 :                 let layer = guard.get_from_desc(&layer);
    5604            8 :                 let mut reconstruct_data = ValuesReconstructState::default();
    5605            8 :                 layer
    5606            8 :                     .get_values_reconstruct_data(
    5607            8 :                         KeySpace::single(Key::MIN..Key::MAX),
    5608            8 :                         lsn..Lsn(lsn.0 + 1),
    5609            8 :                         &mut reconstruct_data,
    5610            8 :                         ctx,
    5611            8 :                     )
    5612           13 :                     .await?;
    5613           74 :                 for (k, v) in reconstruct_data.keys {
    5614           66 :                     all_data.push((k, v?.img.unwrap().1));
    5615              :                 }
    5616           26 :             }
    5617              :         }
    5618            6 :         all_data.sort();
    5619            6 :         Ok(all_data)
    5620            6 :     }
    5621              : 
    5622              :     /// Get all historic layer descriptors in the layer map
    5623              :     #[cfg(test)]
    5624           12 :     pub(crate) async fn inspect_historic_layers(
    5625           12 :         self: &Arc<Timeline>,
    5626           12 :     ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
    5627           12 :         let mut layers = Vec::new();
    5628           12 :         let guard = self.layers.read().await;
    5629           70 :         for layer in guard.layer_map()?.iter_historic_layers() {
    5630           70 :             layers.push(layer.key());
    5631           70 :         }
    5632           12 :         Ok(layers)
    5633           12 :     }
    5634              : 
    5635              :     #[cfg(test)]
    5636           10 :     pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
    5637           10 :         let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
    5638           10 :         keyspace.merge(&ks);
    5639           10 :         self.extra_test_dense_keyspace.store(Arc::new(keyspace));
    5640           10 :     }
    5641              : }
    5642              : 
    5643              : /// Tracking writes ingestion does to a particular in-memory layer.
    5644              : ///
    5645              : /// Cleared upon freezing a layer.
    5646              : pub(crate) struct TimelineWriterState {
    5647              :     open_layer: Arc<InMemoryLayer>,
    5648              :     current_size: u64,
    5649              :     // Previous Lsn which passed through
    5650              :     prev_lsn: Option<Lsn>,
    5651              :     // Largest Lsn which passed through the current writer
    5652              :     max_lsn: Option<Lsn>,
    5653              :     // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
    5654              :     cached_last_freeze_at: Lsn,
    5655              : }
    5656              : 
    5657              : impl TimelineWriterState {
    5658         1268 :     fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
    5659         1268 :         Self {
    5660         1268 :             open_layer,
    5661         1268 :             current_size,
    5662         1268 :             prev_lsn: None,
    5663         1268 :             max_lsn: None,
    5664         1268 :             cached_last_freeze_at: last_freeze_at,
    5665         1268 :         }
    5666         1268 :     }
    5667              : }
    5668              : 
    5669              : /// Various functions to mutate the timeline.
    5670              : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
    5671              : // This is probably considered a bad practice in Rust and should be fixed eventually,
    5672              : // but will cause large code changes.
    5673              : pub(crate) struct TimelineWriter<'a> {
    5674              :     tl: &'a Timeline,
    5675              :     write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
    5676              : }
    5677              : 
    5678              : impl Deref for TimelineWriter<'_> {
    5679              :     type Target = Timeline;
    5680              : 
    5681      4807192 :     fn deref(&self) -> &Self::Target {
    5682      4807192 :         self.tl
    5683      4807192 :     }
    5684              : }
    5685              : 
    5686              : #[derive(PartialEq)]
    5687              : enum OpenLayerAction {
    5688              :     Roll,
    5689              :     Open,
    5690              :     None,
    5691              : }
    5692              : 
    5693              : impl<'a> TimelineWriter<'a> {
    5694      4804204 :     async fn handle_open_layer_action(
    5695      4804204 :         &mut self,
    5696      4804204 :         at: Lsn,
    5697      4804204 :         action: OpenLayerAction,
    5698      4804204 :         ctx: &RequestContext,
    5699      4804204 :     ) -> anyhow::Result<&Arc<InMemoryLayer>> {
    5700      4804204 :         match action {
    5701              :             OpenLayerAction::Roll => {
    5702           80 :                 let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
    5703           80 :                 self.roll_layer(freeze_at).await?;
    5704           80 :                 self.open_layer(at, ctx).await?;
    5705              :             }
    5706         1188 :             OpenLayerAction::Open => self.open_layer(at, ctx).await?,
    5707              :             OpenLayerAction::None => {
    5708      4802936 :                 assert!(self.write_guard.is_some());
    5709              :             }
    5710              :         }
    5711              : 
    5712      4804204 :         Ok(&self.write_guard.as_ref().unwrap().open_layer)
    5713      4804204 :     }
    5714              : 
    5715         1268 :     async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
    5716         1268 :         let layer = self
    5717         1268 :             .tl
    5718         1268 :             .get_layer_for_write(at, &self.write_guard, ctx)
    5719          719 :             .await?;
    5720         1268 :         let initial_size = layer.size().await?;
    5721              : 
    5722         1268 :         let last_freeze_at = self.last_freeze_at.load();
    5723         1268 :         self.write_guard.replace(TimelineWriterState::new(
    5724         1268 :             layer,
    5725         1268 :             initial_size,
    5726         1268 :             last_freeze_at,
    5727         1268 :         ));
    5728         1268 : 
    5729         1268 :         Ok(())
    5730         1268 :     }
    5731              : 
    5732           80 :     async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
    5733           80 :         let current_size = self.write_guard.as_ref().unwrap().current_size;
    5734           80 : 
    5735           80 :         // self.write_guard will be taken by the freezing
    5736           80 :         self.tl
    5737           80 :             .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
    5738            4 :             .await?;
    5739              : 
    5740           80 :         assert!(self.write_guard.is_none());
    5741              : 
    5742           80 :         if current_size >= self.get_checkpoint_distance() * 2 {
    5743            0 :             warn!("Flushed oversized open layer with size {}", current_size)
    5744           80 :         }
    5745              : 
    5746           80 :         Ok(())
    5747           80 :     }
    5748              : 
    5749      4804204 :     fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
    5750      4804204 :         let state = &*self.write_guard;
    5751      4804204 :         let Some(state) = &state else {
    5752         1188 :             return OpenLayerAction::Open;
    5753              :         };
    5754              : 
    5755              :         #[cfg(feature = "testing")]
    5756      4803016 :         if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
    5757              :             // this check and assertion are not really needed because
    5758              :             // LayerManager::try_freeze_in_memory_layer will always clear out the
    5759              :             // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
    5760              :             // is no TimelineWriterState.
    5761            0 :             assert!(
    5762            0 :                 state.open_layer.end_lsn.get().is_some(),
    5763            0 :                 "our open_layer must be outdated"
    5764              :             );
    5765              : 
    5766              :             // this would be a memory leak waiting to happen because the in-memory layer always has
    5767              :             // an index
    5768            0 :             panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
    5769      4803016 :         }
    5770      4803016 : 
    5771      4803016 :         if state.prev_lsn == Some(lsn) {
    5772              :             // Rolling mid LSN is not supported by [downstream code].
    5773              :             // Hence, only roll at LSN boundaries.
    5774              :             //
    5775              :             // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
    5776            6 :             return OpenLayerAction::None;
    5777      4803010 :         }
    5778      4803010 : 
    5779      4803010 :         if state.current_size == 0 {
    5780              :             // Don't roll empty layers
    5781            0 :             return OpenLayerAction::None;
    5782      4803010 :         }
    5783      4803010 : 
    5784      4803010 :         if self.tl.should_roll(
    5785      4803010 :             state.current_size,
    5786      4803010 :             state.current_size + new_value_size,
    5787      4803010 :             self.get_checkpoint_distance(),
    5788      4803010 :             lsn,
    5789      4803010 :             state.cached_last_freeze_at,
    5790      4803010 :             state.open_layer.get_opened_at(),
    5791      4803010 :         ) {
    5792           80 :             OpenLayerAction::Roll
    5793              :         } else {
    5794      4802930 :             OpenLayerAction::None
    5795              :         }
    5796      4804204 :     }
    5797              : 
    5798              :     /// Put a batch of keys at the specified Lsns.
    5799      4804202 :     pub(crate) async fn put_batch(
    5800      4804202 :         &mut self,
    5801      4804202 :         batch: SerializedValueBatch,
    5802      4804202 :         ctx: &RequestContext,
    5803      4804202 :     ) -> anyhow::Result<()> {
    5804      4804202 :         if batch.is_empty() {
    5805            0 :             return Ok(());
    5806      4804202 :         }
    5807      4804202 : 
    5808      4804202 :         let batch_max_lsn = batch.max_lsn;
    5809      4804202 :         let buf_size: u64 = batch.buffer_size() as u64;
    5810      4804202 : 
    5811      4804202 :         let action = self.get_open_layer_action(batch_max_lsn, buf_size);
    5812      4804202 :         let layer = self
    5813      4804202 :             .handle_open_layer_action(batch_max_lsn, action, ctx)
    5814          723 :             .await?;
    5815              : 
    5816      4804202 :         let res = layer.put_batch(batch, ctx).await;
    5817              : 
    5818      4804202 :         if res.is_ok() {
    5819      4804202 :             // Update the current size only when the entire write was ok.
    5820      4804202 :             // In case of failures, we may have had partial writes which
    5821      4804202 :             // render the size tracking out of sync. That's ok because
    5822      4804202 :             // the checkpoint distance should be significantly smaller
    5823      4804202 :             // than the S3 single shot upload limit of 5GiB.
    5824      4804202 :             let state = self.write_guard.as_mut().unwrap();
    5825      4804202 : 
    5826      4804202 :             state.current_size += buf_size;
    5827      4804202 :             state.prev_lsn = Some(batch_max_lsn);
    5828      4804202 :             state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
    5829      4804202 :         }
    5830              : 
    5831      4804202 :         res
    5832      4804202 :     }
    5833              : 
    5834              :     #[cfg(test)]
    5835              :     /// Test helper, for tests that would like to poke individual values without composing a batch
    5836      4390154 :     pub(crate) async fn put(
    5837      4390154 :         &mut self,
    5838      4390154 :         key: Key,
    5839      4390154 :         lsn: Lsn,
    5840      4390154 :         value: &Value,
    5841      4390154 :         ctx: &RequestContext,
    5842      4390154 :     ) -> anyhow::Result<()> {
    5843              :         use utils::bin_ser::BeSer;
    5844      4390154 :         if !key.is_valid_key_on_write_path() {
    5845            0 :             bail!(
    5846            0 :                 "the request contains data not supported by pageserver at TimelineWriter::put: {}",
    5847            0 :                 key
    5848            0 :             );
    5849      4390154 :         }
    5850      4390154 :         let val_ser_size = value.serialized_size().unwrap() as usize;
    5851      4390154 :         let batch = SerializedValueBatch::from_values(vec![(
    5852      4390154 :             key.to_compact(),
    5853      4390154 :             lsn,
    5854      4390154 :             val_ser_size,
    5855      4390154 :             value.clone(),
    5856      4390154 :         )]);
    5857      4390154 : 
    5858      4390154 :         self.put_batch(batch, ctx).await
    5859      4390154 :     }
    5860              : 
    5861            2 :     pub(crate) async fn delete_batch(
    5862            2 :         &mut self,
    5863            2 :         batch: &[(Range<Key>, Lsn)],
    5864            2 :         ctx: &RequestContext,
    5865            2 :     ) -> anyhow::Result<()> {
    5866            2 :         if let Some((_, lsn)) = batch.first() {
    5867            2 :             let action = self.get_open_layer_action(*lsn, 0);
    5868            2 :             let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
    5869            2 :             layer.put_tombstones(batch).await?;
    5870            0 :         }
    5871              : 
    5872            2 :         Ok(())
    5873            2 :     }
    5874              : 
    5875              :     /// Track the end of the latest digested WAL record.
    5876              :     /// Remember the (end of) last valid WAL record remembered in the timeline.
    5877              :     ///
    5878              :     /// Call this after you have finished writing all the WAL up to 'lsn'.
    5879              :     ///
    5880              :     /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
    5881              :     /// the 'lsn' or anything older. The previous last record LSN is stored alongside
    5882              :     /// the latest and can be read.
    5883      5279064 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    5884      5279064 :         self.tl.finish_write(new_lsn);
    5885      5279064 :     }
    5886              : 
    5887       270570 :     pub(crate) fn update_current_logical_size(&self, delta: i64) {
    5888       270570 :         self.tl.update_current_logical_size(delta)
    5889       270570 :     }
    5890              : }
    5891              : 
    5892              : // We need TimelineWriter to be send in upcoming conversion of
    5893              : // Timeline::layers to tokio::sync::RwLock.
    5894              : #[test]
    5895            2 : fn is_send() {
    5896            2 :     fn _assert_send<T: Send>() {}
    5897            2 :     _assert_send::<TimelineWriter<'_>>();
    5898            2 : }
    5899              : 
    5900              : #[cfg(test)]
    5901              : mod tests {
    5902              :     use pageserver_api::key::Key;
    5903              :     use pageserver_api::value::Value;
    5904              :     use utils::{id::TimelineId, lsn::Lsn};
    5905              : 
    5906              :     use crate::tenant::{
    5907              :         harness::{test_img, TenantHarness},
    5908              :         layer_map::LayerMap,
    5909              :         storage_layer::{Layer, LayerName},
    5910              :         timeline::{DeltaLayerTestDesc, EvictionError},
    5911              :         Timeline,
    5912              :     };
    5913              : 
    5914              :     #[tokio::test]
    5915            2 :     async fn test_heatmap_generation() {
    5916            2 :         let harness = TenantHarness::create("heatmap_generation").await.unwrap();
    5917            2 : 
    5918            2 :         let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    5919            2 :             Lsn(0x10)..Lsn(0x20),
    5920            2 :             vec![(
    5921            2 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    5922            2 :                 Lsn(0x11),
    5923            2 :                 Value::Image(test_img("foo")),
    5924            2 :             )],
    5925            2 :         );
    5926            2 :         let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    5927            2 :             Lsn(0x10)..Lsn(0x20),
    5928            2 :             vec![(
    5929            2 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    5930            2 :                 Lsn(0x11),
    5931            2 :                 Value::Image(test_img("foo")),
    5932            2 :             )],
    5933            2 :         );
    5934            2 :         let l0_delta = DeltaLayerTestDesc::new(
    5935            2 :             Lsn(0x20)..Lsn(0x30),
    5936            2 :             Key::from_hex("000000000000000000000000000000000000").unwrap()
    5937            2 :                 ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
    5938            2 :             vec![(
    5939            2 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    5940            2 :                 Lsn(0x25),
    5941            2 :                 Value::Image(test_img("foo")),
    5942            2 :             )],
    5943            2 :         );
    5944            2 :         let delta_layers = vec![
    5945            2 :             covered_delta.clone(),
    5946            2 :             visible_delta.clone(),
    5947            2 :             l0_delta.clone(),
    5948            2 :         ];
    5949            2 : 
    5950            2 :         let image_layer = (
    5951            2 :             Lsn(0x40),
    5952            2 :             vec![(
    5953            2 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    5954            2 :                 test_img("bar"),
    5955            2 :             )],
    5956            2 :         );
    5957            2 :         let image_layers = vec![image_layer];
    5958            2 : 
    5959           20 :         let (tenant, ctx) = harness.load().await;
    5960            2 :         let timeline = tenant
    5961            2 :             .create_test_timeline_with_layers(
    5962            2 :                 TimelineId::generate(),
    5963            2 :                 Lsn(0x10),
    5964            2 :                 14,
    5965            2 :                 &ctx,
    5966            2 :                 delta_layers,
    5967            2 :                 image_layers,
    5968            2 :                 Lsn(0x100),
    5969            2 :             )
    5970           31 :             .await
    5971            2 :             .unwrap();
    5972            2 : 
    5973            2 :         // Layer visibility is an input to heatmap generation, so refresh it first
    5974            2 :         timeline.update_layer_visibility().await.unwrap();
    5975            2 : 
    5976            2 :         let heatmap = timeline
    5977            2 :             .generate_heatmap()
    5978            2 :             .await
    5979            2 :             .expect("Infallible while timeline is not shut down");
    5980            2 : 
    5981            2 :         assert_eq!(heatmap.timeline_id, timeline.timeline_id);
    5982            2 : 
    5983            2 :         // L0 should come last
    5984            2 :         assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
    5985            2 : 
    5986            2 :         let mut last_lsn = Lsn::MAX;
    5987           10 :         for layer in heatmap.layers {
    5988            2 :             // Covered layer should be omitted
    5989            8 :             assert!(layer.name != covered_delta.layer_name());
    5990            2 : 
    5991            8 :             let layer_lsn = match &layer.name {
    5992            4 :                 LayerName::Delta(d) => d.lsn_range.end,
    5993            4 :                 LayerName::Image(i) => i.lsn,
    5994            2 :             };
    5995            2 : 
    5996            2 :             // Apart from L0s, newest Layers should come first
    5997            8 :             if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
    5998            6 :                 assert!(layer_lsn <= last_lsn);
    5999            6 :                 last_lsn = layer_lsn;
    6000            2 :             }
    6001            2 :         }
    6002            2 :     }
    6003              : 
    6004              :     #[tokio::test]
    6005            2 :     async fn two_layer_eviction_attempts_at_the_same_time() {
    6006            2 :         let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
    6007            2 :             .await
    6008            2 :             .unwrap();
    6009            2 : 
    6010           20 :         let (tenant, ctx) = harness.load().await;
    6011            2 :         let timeline = tenant
    6012            2 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
    6013            6 :             .await
    6014            2 :             .unwrap();
    6015            2 : 
    6016            2 :         let layer = find_some_layer(&timeline).await;
    6017            2 :         let layer = layer
    6018            2 :             .keep_resident()
    6019            2 :             .await
    6020            2 :             .expect("no download => no downloading errors")
    6021            2 :             .drop_eviction_guard();
    6022            2 : 
    6023            2 :         let forever = std::time::Duration::from_secs(120);
    6024            2 : 
    6025            2 :         let first = layer.evict_and_wait(forever);
    6026            2 :         let second = layer.evict_and_wait(forever);
    6027            2 : 
    6028            2 :         let (first, second) = tokio::join!(first, second);
    6029            2 : 
    6030            2 :         let res = layer.keep_resident().await;
    6031            2 :         assert!(res.is_none(), "{res:?}");
    6032            2 : 
    6033            2 :         match (first, second) {
    6034            2 :             (Ok(()), Ok(())) => {
    6035            2 :                 // because there are no more timeline locks being taken on eviction path, we can
    6036            2 :                 // witness all three outcomes here.
    6037            2 :             }
    6038            2 :             (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
    6039            0 :                 // if one completes before the other, this is fine just as well.
    6040            0 :             }
    6041            2 :             other => unreachable!("unexpected {:?}", other),
    6042            2 :         }
    6043            2 :     }
    6044              : 
    6045            2 :     async fn find_some_layer(timeline: &Timeline) -> Layer {
    6046            2 :         let layers = timeline.layers.read().await;
    6047            2 :         let desc = layers
    6048            2 :             .layer_map()
    6049            2 :             .unwrap()
    6050            2 :             .iter_historic_layers()
    6051            2 :             .next()
    6052            2 :             .expect("must find one layer to evict");
    6053            2 : 
    6054            2 :         layers.get_from_desc(&desc)
    6055            2 :     }
    6056              : }
        

Generated by: LCOV version 2.1-beta