LCOV - code coverage report
Current view: top level - pageserver/src/tenant - timeline.rs (source / functions) Coverage Total Hit
Test: 1e20c4f2b28aa592527961bb32170ebbd2c9172f.info Lines: 62.4 % 4494 2804
Test Date: 2025-07-16 12:29:03 Functions: 58.0 % 402 233

            Line data    Source code
       1              : pub(crate) mod analysis;
       2              : pub(crate) mod compaction;
       3              : pub mod delete;
       4              : pub(crate) mod detach_ancestor;
       5              : mod eviction_task;
       6              : pub(crate) mod handle;
       7              : mod heatmap_layers_downloader;
       8              : pub(crate) mod import_pgdata;
       9              : mod init;
      10              : pub mod layer_manager;
      11              : pub(crate) mod logical_size;
      12              : pub mod offload;
      13              : pub mod span;
      14              : pub mod uninit;
      15              : mod walreceiver;
      16              : 
      17              : use hashlink::LruCache;
      18              : use std::array;
      19              : use std::cmp::{max, min};
      20              : use std::collections::btree_map::Entry;
      21              : use std::collections::{BTreeMap, HashMap, HashSet};
      22              : use std::ops::{ControlFlow, Deref, Range};
      23              : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
      24              : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
      25              : use std::time::{Duration, Instant, SystemTime};
      26              : 
      27              : use anyhow::{Context, Result, anyhow, bail, ensure};
      28              : use arc_swap::{ArcSwap, ArcSwapOption};
      29              : use bytes::Bytes;
      30              : use camino::Utf8Path;
      31              : use chrono::{DateTime, Utc};
      32              : use compaction::{CompactionOutcome, GcCompactionCombinedSettings};
      33              : use enumset::EnumSet;
      34              : use fail::fail_point;
      35              : use futures::stream::FuturesUnordered;
      36              : use futures::{FutureExt, StreamExt};
      37              : use handle::ShardTimelineId;
      38              : use layer_manager::{
      39              :     LayerManagerLockHolder, LayerManagerReadGuard, LayerManagerWriteGuard, LockedLayerManager,
      40              :     Shutdown,
      41              : };
      42              : 
      43              : use once_cell::sync::Lazy;
      44              : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
      45              : use pageserver_api::key::{
      46              :     KEY_SIZE, Key, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
      47              :     SPARSE_RANGE,
      48              : };
      49              : use pageserver_api::keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning};
      50              : use pageserver_api::models::{
      51              :     CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
      52              :     DetachBehavior, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
      53              :     EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, LsnLease, PageTraceEvent, RelSizeMigration,
      54              :     TimelineState,
      55              : };
      56              : use pageserver_api::reltag::{BlockNumber, RelTag};
      57              : use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
      58              : use postgres_connection::PgConnectionConfig;
      59              : use postgres_ffi::v14::xlog_utils;
      60              : use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp};
      61              : use rand::Rng;
      62              : use remote_storage::DownloadError;
      63              : use serde_with::serde_as;
      64              : use storage_broker::BrokerClientChannel;
      65              : use tokio::runtime::Handle;
      66              : use tokio::sync::mpsc::Sender;
      67              : use tokio::sync::{Notify, oneshot, watch};
      68              : use tokio_util::sync::CancellationToken;
      69              : use tracing::*;
      70              : use utils::generation::Generation;
      71              : use utils::guard_arc_swap::GuardArcSwap;
      72              : use utils::id::TimelineId;
      73              : use utils::logging::{MonitorSlowFutureCallback, monitor_slow_future};
      74              : use utils::lsn::{AtomicLsn, Lsn, RecordLsn};
      75              : use utils::postgres_client::PostgresClientProtocol;
      76              : use utils::rate_limit::RateLimit;
      77              : use utils::seqwait::SeqWait;
      78              : use utils::simple_rcu::{Rcu, RcuReadGuard};
      79              : use utils::sync::gate::{Gate, GateGuard};
      80              : use utils::{completion, critical_timeline, fs_ext, pausable_failpoint};
      81              : #[cfg(test)]
      82              : use wal_decoder::models::value::Value;
      83              : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
      84              : 
      85              : use self::delete::DeleteTimelineFlow;
      86              : pub(super) use self::eviction_task::EvictionTaskTenantState;
      87              : use self::eviction_task::EvictionTaskTimelineState;
      88              : use self::logical_size::LogicalSize;
      89              : use self::walreceiver::{WalReceiver, WalReceiverConf};
      90              : use super::remote_timeline_client::RemoteTimelineClient;
      91              : use super::remote_timeline_client::index::{GcCompactionState, IndexPart};
      92              : use super::secondary::heatmap::HeatMapLayer;
      93              : use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer};
      94              : use super::tasks::log_compaction_error;
      95              : use super::upload_queue::NotInitialized;
      96              : use super::{
      97              :     AttachedTenantConf, GcError, HeatMapTimeline, MaybeOffloaded,
      98              :     debug_assert_current_span_has_tenant_and_timeline_id,
      99              : };
     100              : use crate::PERF_TRACE_TARGET;
     101              : use crate::aux_file::AuxFileSizeEstimator;
     102              : use crate::basebackup_cache::BasebackupCache;
     103              : use crate::config::PageServerConf;
     104              : use crate::context::{
     105              :     DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
     106              : };
     107              : use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32};
     108              : use crate::feature_resolver::TenantFeatureResolver;
     109              : use crate::keyspace::{KeyPartitioning, KeySpace};
     110              : use crate::l0_flush::{self, L0FlushGlobalState};
     111              : use crate::metrics::{
     112              :     DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_AMORTIZED_GLOBAL, LAYERS_PER_READ_BATCH_GLOBAL,
     113              :     LAYERS_PER_READ_GLOBAL, ScanLatencyOngoingRecording, TimelineMetrics,
     114              : };
     115              : use crate::page_service::TenantManagerTypes;
     116              : use crate::pgdatadir_mapping::{
     117              :     CalculateLogicalSizeError, CollectKeySpaceError, DirectoryKind, LsnForTimestamp,
     118              :     MAX_AUX_FILE_V2_DELTAS, MetricsUpdate,
     119              : };
     120              : use crate::task_mgr::TaskKind;
     121              : use crate::tenant::gc_result::GcResult;
     122              : use crate::tenant::layer_map::LayerMap;
     123              : use crate::tenant::metadata::TimelineMetadata;
     124              : use crate::tenant::storage_layer::delta_layer::DeltaEntry;
     125              : use crate::tenant::storage_layer::inmemory_layer::IndexEntry;
     126              : use crate::tenant::storage_layer::{
     127              :     AsLayerDesc, BatchLayerWriter, DeltaLayerWriter, EvictionError, ImageLayerName,
     128              :     ImageLayerWriter, InMemoryLayer, IoConcurrency, Layer, LayerAccessStatsReset, LayerName,
     129              :     PersistentLayerDesc, PersistentLayerKey, ResidentLayer, ValueReconstructSituation,
     130              :     ValueReconstructState, ValuesReconstructState,
     131              : };
     132              : use crate::tenant::tasks::BackgroundLoopKind;
     133              : use crate::tenant::timeline::logical_size::CurrentLogicalSize;
     134              : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
     135              : use crate::walingest::WalLagCooldown;
     136              : use crate::walredo::RedoAttemptType;
     137              : use crate::{ZERO_PAGE, task_mgr, walredo};
     138              : 
     139              : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
     140              : pub(crate) enum FlushLoopState {
     141              :     NotStarted,
     142              :     Running {
     143              :         #[cfg(test)]
     144              :         expect_initdb_optimization: bool,
     145              :         #[cfg(test)]
     146              :         initdb_optimization_count: usize,
     147              :     },
     148              :     Exited,
     149              : }
     150              : 
     151              : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
     152              : pub enum ImageLayerCreationMode {
     153              :     /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
     154              :     Try,
     155              :     /// Force creating the image layers if possible. For now, no image layers will be created
     156              :     /// for metadata keys. Used in compaction code path with force flag enabled.
     157              :     Force,
     158              :     /// Initial ingestion of the data, and no data should be dropped in this function. This
     159              :     /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
     160              :     /// code path.
     161              :     Initial,
     162              : }
     163              : 
     164              : #[derive(Clone, Debug, Default)]
     165              : pub enum LastImageLayerCreationStatus {
     166              :     Incomplete {
     167              :         /// The last key of the partition (exclusive) that was processed in the last
     168              :         /// image layer creation attempt. We will continue from this key in the next
     169              :         /// attempt.
     170              :         last_key: Key,
     171              :     },
     172              :     Complete,
     173              :     #[default]
     174              :     Initial,
     175              : }
     176              : 
     177              : impl std::fmt::Display for ImageLayerCreationMode {
     178          191 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     179          191 :         write!(f, "{self:?}")
     180          191 :     }
     181              : }
     182              : 
     183              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     184              : /// Can be removed after all refactors are done.
     185           23 : fn drop_layer_manager_rlock(rlock: LayerManagerReadGuard<'_>) {
     186           23 :     drop(rlock)
     187           23 : }
     188              : 
     189              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     190              : /// Can be removed after all refactors are done.
     191          214 : fn drop_layer_manager_wlock(rlock: LayerManagerWriteGuard<'_>) {
     192          214 :     drop(rlock)
     193          214 : }
     194              : 
     195              : /// The outward-facing resources required to build a Timeline
     196              : pub struct TimelineResources {
     197              :     pub remote_client: RemoteTimelineClient,
     198              :     pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
     199              :     pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
     200              :     pub l0_compaction_trigger: Arc<Notify>,
     201              :     pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
     202              :     pub basebackup_cache: Arc<BasebackupCache>,
     203              :     pub feature_resolver: Arc<TenantFeatureResolver>,
     204              : }
     205              : 
     206              : pub struct Timeline {
     207              :     pub(crate) conf: &'static PageServerConf,
     208              :     tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
     209              : 
     210              :     myself: Weak<Self>,
     211              : 
     212              :     pub(crate) tenant_shard_id: TenantShardId,
     213              :     pub timeline_id: TimelineId,
     214              : 
     215              :     /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
     216              :     /// Never changes for the lifetime of this [`Timeline`] object.
     217              :     ///
     218              :     /// This duplicates the generation stored in LocationConf, but that structure is mutable:
     219              :     /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
     220              :     pub(crate) generation: Generation,
     221              : 
     222              :     /// The detailed sharding information from our parent Tenant.  This enables us to map keys
     223              :     /// to shards, and is constant through the lifetime of this Timeline.
     224              :     shard_identity: ShardIdentity,
     225              : 
     226              :     pub pg_version: PgMajorVersion,
     227              : 
     228              :     /// The tuple has two elements.
     229              :     /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
     230              :     /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
     231              :     ///
     232              :     /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
     233              :     /// We describe these rectangles through the `PersistentLayerDesc` struct.
     234              :     ///
     235              :     /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
     236              :     /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
     237              :     /// `PersistentLayerDesc`'s.
     238              :     ///
     239              :     /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
     240              :     /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
     241              :     /// runtime, e.g., during page reconstruction.
     242              :     ///
     243              :     /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
     244              :     /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
     245              :     pub(crate) layers: LockedLayerManager,
     246              : 
     247              :     last_freeze_at: AtomicLsn,
     248              :     // Atomic would be more appropriate here.
     249              :     last_freeze_ts: RwLock<Instant>,
     250              : 
     251              :     pub(crate) standby_horizon: AtomicLsn,
     252              : 
     253              :     // WAL redo manager. `None` only for broken tenants.
     254              :     walredo_mgr: Option<Arc<super::WalRedoManager>>,
     255              : 
     256              :     /// Remote storage client.
     257              :     /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
     258              :     pub(crate) remote_client: Arc<RemoteTimelineClient>,
     259              : 
     260              :     // What page versions do we hold in the repository? If we get a
     261              :     // request > last_record_lsn, we need to wait until we receive all
     262              :     // the WAL up to the request. The SeqWait provides functions for
     263              :     // that. TODO: If we get a request for an old LSN, such that the
     264              :     // versions have already been garbage collected away, we should
     265              :     // throw an error, but we don't track that currently.
     266              :     //
     267              :     // last_record_lsn.load().last points to the end of last processed WAL record.
     268              :     //
     269              :     // We also remember the starting point of the previous record in
     270              :     // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
     271              :     // first WAL record when the node is started up. But here, we just
     272              :     // keep track of it.
     273              :     last_record_lsn: SeqWait<RecordLsn, Lsn>,
     274              : 
     275              :     // All WAL records have been processed and stored durably on files on
     276              :     // local disk, up to this LSN. On crash and restart, we need to re-process
     277              :     // the WAL starting from this point.
     278              :     //
     279              :     // Some later WAL records might have been processed and also flushed to disk
     280              :     // already, so don't be surprised to see some, but there's no guarantee on
     281              :     // them yet.
     282              :     disk_consistent_lsn: AtomicLsn,
     283              : 
     284              :     // Parent timeline that this timeline was branched from, and the LSN
     285              :     // of the branch point.
     286              :     ancestor_timeline: Option<Arc<Timeline>>,
     287              :     ancestor_lsn: Lsn,
     288              : 
     289              :     // The LSN of gc-compaction that was last applied to this timeline.
     290              :     gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
     291              : 
     292              :     pub(crate) metrics: Arc<TimelineMetrics>,
     293              : 
     294              :     // `Timeline` doesn't write these metrics itself, but it manages the lifetime.  Code
     295              :     // in `crate::page_service` writes these metrics.
     296              :     pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
     297              : 
     298              :     directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM],
     299              :     directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
     300              : 
     301              :     /// Ensures layers aren't frozen by checkpointer between
     302              :     /// [`Timeline::get_layer_for_write`] and layer reads.
     303              :     /// Locked automatically by [`TimelineWriter`] and checkpointer.
     304              :     /// Must always be acquired before the layer map/individual layer lock
     305              :     /// to avoid deadlock.
     306              :     ///
     307              :     /// The state is cleared upon freezing.
     308              :     write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
     309              : 
     310              :     /// Used to avoid multiple `flush_loop` tasks running
     311              :     pub(super) flush_loop_state: Mutex<FlushLoopState>,
     312              : 
     313              :     /// layer_flush_start_tx can be used to wake up the layer-flushing task.
     314              :     /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
     315              :     ///   The flush cycle counter is sent back on the layer_flush_done channel when
     316              :     ///   the flush finishes. You can use that to wait for the flush to finish.
     317              :     /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
     318              :     ///   read by whoever sends an update
     319              :     layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
     320              :     /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
     321              :     layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
     322              : 
     323              :     // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
     324              :     // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
     325              :     // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
     326              :     // the planned GC cutoff.
     327              :     pub applied_gc_cutoff_lsn: Rcu<Lsn>,
     328              : 
     329              :     pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
     330              : 
     331              :     // List of child timelines and their branch points. This is needed to avoid
     332              :     // garbage collecting data that is still needed by the child timelines.
     333              :     pub(crate) gc_info: std::sync::RwLock<GcInfo>,
     334              : 
     335              :     pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
     336              : 
     337              :     // It may change across major versions so for simplicity
     338              :     // keep it after running initdb for a timeline.
     339              :     // It is needed in checks when we want to error on some operations
     340              :     // when they are requested for pre-initdb lsn.
     341              :     // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
     342              :     // though let's keep them both for better error visibility.
     343              :     pub initdb_lsn: Lsn,
     344              : 
     345              :     /// The repartitioning result. Allows a single writer and multiple readers.
     346              :     pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
     347              : 
     348              :     /// Configuration: how often should the partitioning be recalculated.
     349              :     repartition_threshold: u64,
     350              : 
     351              :     last_image_layer_creation_check_at: AtomicLsn,
     352              :     last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
     353              : 
     354              :     /// Current logical size of the "datadir", at the last LSN.
     355              :     current_logical_size: LogicalSize,
     356              : 
     357              :     /// Information about the last processed message by the WAL receiver,
     358              :     /// or None if WAL receiver has not received anything for this timeline
     359              :     /// yet.
     360              :     pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
     361              :     pub walreceiver: Mutex<Option<WalReceiver>>,
     362              : 
     363              :     /// Relation size cache
     364              :     pub(crate) rel_size_latest_cache: RwLock<HashMap<RelTag, (Lsn, BlockNumber)>>,
     365              :     pub(crate) rel_size_snapshot_cache: Mutex<LruCache<(Lsn, RelTag), BlockNumber>>,
     366              : 
     367              :     download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
     368              : 
     369              :     state: watch::Sender<TimelineState>,
     370              : 
     371              :     /// Prevent two tasks from deleting the timeline at the same time. If held, the
     372              :     /// timeline is being deleted. If 'true', the timeline has already been deleted.
     373              :     pub delete_progress: TimelineDeleteProgress,
     374              : 
     375              :     eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
     376              : 
     377              :     /// Load or creation time information about the disk_consistent_lsn and when the loading
     378              :     /// happened. Used for consumption metrics.
     379              :     pub(crate) loaded_at: (Lsn, SystemTime),
     380              : 
     381              :     /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
     382              :     pub(crate) gate: Gate,
     383              : 
     384              :     /// Cancellation token scoped to this timeline: anything doing long-running work relating
     385              :     /// to the timeline should drop out when this token fires.
     386              :     pub(crate) cancel: CancellationToken,
     387              : 
     388              :     /// Make sure we only have one running compaction at a time in tests.
     389              :     ///
     390              :     /// Must only be taken in two places:
     391              :     /// - [`Timeline::compact`] (this file)
     392              :     /// - [`delete::delete_local_timeline_directory`]
     393              :     ///
     394              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     395              :     compaction_lock: tokio::sync::Mutex<()>,
     396              : 
     397              :     /// If true, the last compaction failed.
     398              :     compaction_failed: AtomicBool,
     399              : 
     400              :     /// Notifies the tenant compaction loop that there is pending L0 compaction work.
     401              :     l0_compaction_trigger: Arc<Notify>,
     402              : 
     403              :     /// Make sure we only have one running gc at a time.
     404              :     ///
     405              :     /// Must only be taken in two places:
     406              :     /// - [`Timeline::gc`] (this file)
     407              :     /// - [`delete::delete_local_timeline_directory`]
     408              :     ///
     409              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     410              :     gc_lock: tokio::sync::Mutex<()>,
     411              : 
     412              :     /// Cloned from [`super::TenantShard::pagestream_throttle`] on construction.
     413              :     pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
     414              : 
     415              :     /// Size estimator for aux file v2
     416              :     pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
     417              : 
     418              :     /// Some test cases directly place keys into the timeline without actually modifying the directory
     419              :     /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
     420              :     /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
     421              :     /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
     422              :     /// in the future, add `extra_test_sparse_keyspace` if necessary.
     423              :     #[cfg(test)]
     424              :     pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
     425              : 
     426              :     pub(crate) l0_flush_global_state: L0FlushGlobalState,
     427              : 
     428              :     pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
     429              : 
     430              :     pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
     431              : 
     432              :     /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
     433              :     pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
     434              : 
     435              :     /// If Some, collects GetPage metadata for an ongoing PageTrace.
     436              :     pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
     437              : 
     438              :     pub(super) previous_heatmap: ArcSwapOption<PreviousHeatmap>,
     439              : 
     440              :     /// May host a background Tokio task which downloads all the layers from the current
     441              :     /// heatmap on demand.
     442              :     heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
     443              : 
     444              :     pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
     445              : 
     446              :     wait_lsn_log_slow: tokio::sync::Semaphore,
     447              : 
     448              :     /// A channel to send async requests to prepare a basebackup for the basebackup cache.
     449              :     basebackup_cache: Arc<BasebackupCache>,
     450              : 
     451              :     feature_resolver: Arc<TenantFeatureResolver>,
     452              : }
     453              : 
     454              : pub(crate) enum PreviousHeatmap {
     455              :     Active {
     456              :         heatmap: HeatMapTimeline,
     457              :         read_at: std::time::Instant,
     458              :         // End LSN covered by the heatmap if known
     459              :         end_lsn: Option<Lsn>,
     460              :     },
     461              :     Obsolete,
     462              : }
     463              : 
     464              : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
     465              : 
     466              : pub struct WalReceiverInfo {
     467              :     pub wal_source_connconf: PgConnectionConfig,
     468              :     pub last_received_msg_lsn: Lsn,
     469              :     pub last_received_msg_ts: u128,
     470              : }
     471              : 
     472              : /// Information about how much history needs to be retained, needed by
     473              : /// Garbage Collection.
     474              : #[derive(Default)]
     475              : pub(crate) struct GcInfo {
     476              :     /// Specific LSNs that are needed.
     477              :     ///
     478              :     /// Currently, this includes all points where child branches have
     479              :     /// been forked off from. In the future, could also include
     480              :     /// explicit user-defined snapshot points.
     481              :     pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
     482              : 
     483              :     /// The cutoff coordinates, which are combined by selecting the minimum.
     484              :     pub(crate) cutoffs: GcCutoffs,
     485              : 
     486              :     /// Leases granted to particular LSNs.
     487              :     pub(crate) leases: BTreeMap<Lsn, LsnLease>,
     488              : 
     489              :     /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
     490              :     pub(crate) within_ancestor_pitr: bool,
     491              : }
     492              : 
     493              : impl GcInfo {
     494          154 :     pub(crate) fn min_cutoff(&self) -> Lsn {
     495          154 :         self.cutoffs.select_min()
     496          154 :     }
     497              : 
     498          119 :     pub(super) fn insert_child(
     499          119 :         &mut self,
     500          119 :         child_id: TimelineId,
     501          119 :         child_lsn: Lsn,
     502          119 :         is_offloaded: MaybeOffloaded,
     503          119 :     ) {
     504          119 :         self.retain_lsns.push((child_lsn, child_id, is_offloaded));
     505          119 :         self.retain_lsns.sort_by_key(|i| i.0);
     506          119 :     }
     507              : 
     508            2 :     pub(super) fn remove_child_maybe_offloaded(
     509            2 :         &mut self,
     510            2 :         child_id: TimelineId,
     511            2 :         maybe_offloaded: MaybeOffloaded,
     512            2 :     ) -> bool {
     513              :         // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
     514              :         // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
     515            2 :         let mut removed = false;
     516            3 :         self.retain_lsns.retain(|i| {
     517            3 :             if removed {
     518            1 :                 return true;
     519            2 :             }
     520            2 :             let remove = i.1 == child_id && i.2 == maybe_offloaded;
     521            2 :             removed |= remove;
     522            2 :             !remove
     523            3 :         });
     524            2 :         removed
     525            2 :     }
     526              : 
     527            2 :     pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
     528            2 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
     529            2 :     }
     530              : 
     531            0 :     pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
     532            0 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
     533            0 :     }
     534          119 :     pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
     535          119 :         self.leases.contains_key(&lsn)
     536          119 :     }
     537              : }
     538              : 
     539              : /// The `GcInfo` component describing which Lsns need to be retained.  Functionally, this
     540              : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
     541              : /// between time-based and space-based retention for observability and consumption metrics purposes.
     542              : #[derive(Clone, Debug, Default)]
     543              : pub(crate) struct GcCutoffs {
     544              :     /// Calculated from the [`pageserver_api::models::TenantConfig::gc_horizon`], this LSN indicates how much
     545              :     /// history we must keep to retain a specified number of bytes of WAL.
     546              :     pub(crate) space: Lsn,
     547              : 
     548              :     /// Calculated from [`pageserver_api::models::TenantConfig::pitr_interval`], this LSN indicates
     549              :     /// how much history we must keep to enable reading back at least the PITR interval duration.
     550              :     ///
     551              :     /// None indicates that the PITR cutoff has not been computed. A PITR interval of 0 will yield
     552              :     /// Some(last_record_lsn).
     553              :     pub(crate) time: Option<Lsn>,
     554              : }
     555              : 
     556              : impl GcCutoffs {
     557          154 :     fn select_min(&self) -> Lsn {
     558              :         // NB: if we haven't computed the PITR cutoff yet, we can't GC anything.
     559          154 :         self.space.min(self.time.unwrap_or_default())
     560          154 :     }
     561              : }
     562              : 
     563              : pub(crate) struct TimelineVisitOutcome {
     564              :     completed_keyspace: KeySpace,
     565              :     image_covered_keyspace: KeySpace,
     566              : }
     567              : 
     568              : /// An error happened in a get() operation.
     569              : #[derive(thiserror::Error, Debug)]
     570              : pub(crate) enum PageReconstructError {
     571              :     #[error(transparent)]
     572              :     Other(anyhow::Error),
     573              : 
     574              :     #[error("Ancestor LSN wait error: {0}")]
     575              :     AncestorLsnTimeout(WaitLsnError),
     576              : 
     577              :     #[error("timeline shutting down")]
     578              :     Cancelled,
     579              : 
     580              :     /// An error happened replaying WAL records
     581              :     #[error(transparent)]
     582              :     WalRedo(anyhow::Error),
     583              : 
     584              :     #[error("{0}")]
     585              :     MissingKey(Box<MissingKeyError>),
     586              : }
     587              : 
     588              : impl PageReconstructError {
     589            0 :     pub(crate) fn is_cancel(&self) -> bool {
     590            0 :         match self {
     591            0 :             PageReconstructError::Other(_) => false,
     592            0 :             PageReconstructError::AncestorLsnTimeout(e) => e.is_cancel(),
     593            0 :             PageReconstructError::Cancelled => true,
     594            0 :             PageReconstructError::WalRedo(_) => false,
     595            0 :             PageReconstructError::MissingKey(_) => false,
     596              :         }
     597            0 :     }
     598              :     #[allow(dead_code)] // we use the is_cancel + into_anyhow pattern in quite a few places, this one will follow soon enough
     599            0 :     pub(crate) fn into_anyhow(self) -> anyhow::Error {
     600            0 :         match self {
     601            0 :             PageReconstructError::Other(e) => e,
     602            0 :             PageReconstructError::AncestorLsnTimeout(e) => e.into_anyhow(),
     603            0 :             PageReconstructError::Cancelled => anyhow::Error::new(self),
     604            0 :             PageReconstructError::WalRedo(e) => e,
     605            0 :             PageReconstructError::MissingKey(_) => anyhow::Error::new(self),
     606              :         }
     607            0 :     }
     608              : }
     609              : 
     610              : impl From<anyhow::Error> for PageReconstructError {
     611            1 :     fn from(value: anyhow::Error) -> Self {
     612              :         // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
     613            1 :         match value.downcast::<PageReconstructError>() {
     614            0 :             Ok(pre) => pre,
     615            1 :             Err(other) => PageReconstructError::Other(other),
     616              :         }
     617            1 :     }
     618              : }
     619              : 
     620              : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
     621            0 :     fn from(value: utils::bin_ser::DeserializeError) -> Self {
     622            0 :         PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
     623            0 :     }
     624              : }
     625              : 
     626              : impl From<layer_manager::Shutdown> for PageReconstructError {
     627            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     628            0 :         PageReconstructError::Cancelled
     629            0 :     }
     630              : }
     631              : 
     632              : impl GetVectoredError {
     633              :     #[cfg(test)]
     634            3 :     pub(crate) fn is_missing_key_error(&self) -> bool {
     635            3 :         matches!(self, Self::MissingKey(_))
     636            3 :     }
     637              : }
     638              : 
     639              : impl From<layer_manager::Shutdown> for GetVectoredError {
     640            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     641            0 :         GetVectoredError::Cancelled
     642            0 :     }
     643              : }
     644              : 
     645              : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
     646              : /// only and not used by the "real read path".
     647              : pub enum ReadPathLayerId {
     648              :     PersistentLayer(PersistentLayerKey),
     649              :     InMemoryLayer(Range<Lsn>),
     650              : }
     651              : 
     652              : impl std::fmt::Display for ReadPathLayerId {
     653            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     654            0 :         match self {
     655            0 :             ReadPathLayerId::PersistentLayer(key) => write!(f, "{key}"),
     656            0 :             ReadPathLayerId::InMemoryLayer(range) => {
     657            0 :                 write!(f, "in-mem {}..{}", range.start, range.end)
     658              :             }
     659              :         }
     660            0 :     }
     661              : }
     662              : pub struct ReadPath {
     663              :     keyspace: KeySpace,
     664              :     lsn: Lsn,
     665              :     path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
     666              : }
     667              : 
     668              : impl ReadPath {
     669       312414 :     pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
     670       312414 :         Self {
     671       312414 :             keyspace,
     672       312414 :             lsn,
     673       312414 :             path: Vec::new(),
     674       312414 :         }
     675       312414 :     }
     676              : 
     677       446162 :     pub fn record_layer_visit(
     678       446162 :         &mut self,
     679       446162 :         layer_to_read: &ReadableLayer,
     680       446162 :         keyspace_to_read: &KeySpace,
     681       446162 :         lsn_range: &Range<Lsn>,
     682       446162 :     ) {
     683       446162 :         let id = match layer_to_read {
     684       138845 :             ReadableLayer::PersistentLayer(layer) => {
     685       138845 :                 ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
     686              :             }
     687       307317 :             ReadableLayer::InMemoryLayer(layer) => {
     688       307317 :                 ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
     689              :             }
     690              :         };
     691       446162 :         self.path
     692       446162 :             .push((id, keyspace_to_read.clone(), lsn_range.clone()));
     693       446162 :     }
     694              : }
     695              : 
     696              : impl std::fmt::Display for ReadPath {
     697            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     698            0 :         writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
     699            0 :         for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
     700            0 :             writeln!(
     701            0 :                 f,
     702            0 :                 "{}: {} {}..{} {}",
     703              :                 idx, layer_id, lsn_range.start, lsn_range.end, keyspace
     704            0 :             )?;
     705              :         }
     706            0 :         Ok(())
     707            0 :     }
     708              : }
     709              : 
     710              : #[derive(thiserror::Error)]
     711              : pub struct MissingKeyError {
     712              :     keyspace: KeySpace,
     713              :     shard: ShardNumber,
     714              :     query: Option<VersionedKeySpaceQuery>,
     715              :     // This is largest request LSN from the get page request batch
     716              :     original_hwm_lsn: Lsn,
     717              :     ancestor_lsn: Option<Lsn>,
     718              :     /// Debug information about the read path if there's an error
     719              :     read_path: Option<ReadPath>,
     720              :     backtrace: Option<std::backtrace::Backtrace>,
     721              : }
     722              : 
     723              : impl MissingKeyError {
     724            7 :     fn enrich(&mut self, query: VersionedKeySpaceQuery) {
     725            7 :         self.query = Some(query);
     726            7 :     }
     727              : }
     728              : 
     729              : impl std::fmt::Debug for MissingKeyError {
     730            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     731            0 :         write!(f, "{self}")
     732            0 :     }
     733              : }
     734              : 
     735              : impl std::fmt::Display for MissingKeyError {
     736            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     737            0 :         write!(
     738            0 :             f,
     739            0 :             "could not find data for key {} (shard {:?}), original HWM LSN {}",
     740              :             self.keyspace, self.shard, self.original_hwm_lsn
     741            0 :         )?;
     742              : 
     743            0 :         if let Some(ref ancestor_lsn) = self.ancestor_lsn {
     744            0 :             write!(f, ", ancestor {ancestor_lsn}")?;
     745            0 :         }
     746              : 
     747            0 :         if let Some(ref query) = self.query {
     748            0 :             write!(f, ", query {query}")?;
     749            0 :         }
     750              : 
     751            0 :         if let Some(ref read_path) = self.read_path {
     752            0 :             write!(f, "\n{read_path}")?;
     753            0 :         }
     754              : 
     755            0 :         if let Some(ref backtrace) = self.backtrace {
     756            0 :             write!(f, "\n{backtrace}")?;
     757            0 :         }
     758              : 
     759            0 :         Ok(())
     760            0 :     }
     761              : }
     762              : 
     763              : #[derive(thiserror::Error, Debug)]
     764              : pub(crate) enum CreateImageLayersError {
     765              :     #[error("timeline shutting down")]
     766              :     Cancelled,
     767              : 
     768              :     #[error("read failed")]
     769              :     GetVectoredError(#[source] GetVectoredError),
     770              : 
     771              :     #[error("reconstruction failed")]
     772              :     PageReconstructError(#[source] PageReconstructError),
     773              : 
     774              :     #[error(transparent)]
     775              :     Other(anyhow::Error),
     776              : }
     777              : 
     778              : impl From<layer_manager::Shutdown> for CreateImageLayersError {
     779            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     780            0 :         CreateImageLayersError::Cancelled
     781            0 :     }
     782              : }
     783              : 
     784              : #[derive(thiserror::Error, Debug, Clone)]
     785              : pub(crate) enum FlushLayerError {
     786              :     /// Timeline cancellation token was cancelled
     787              :     #[error("timeline shutting down")]
     788              :     Cancelled,
     789              : 
     790              :     /// We tried to flush a layer while the Timeline is in an unexpected state
     791              :     #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
     792              :     NotRunning(FlushLoopState),
     793              : 
     794              :     // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
     795              :     // loop via a watch channel, where we can only borrow it.
     796              :     #[error("create image layers (shared)")]
     797              :     CreateImageLayersError(Arc<CreateImageLayersError>),
     798              : 
     799              :     #[error("other (shared)")]
     800              :     Other(#[from] Arc<anyhow::Error>),
     801              : }
     802              : 
     803              : impl FlushLayerError {
     804              :     // When crossing from generic anyhow errors to this error type, we explicitly check
     805              :     // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
     806            0 :     fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
     807            0 :         let cancelled = timeline.cancel.is_cancelled()
     808              :             // The upload queue might have been shut down before the official cancellation of the timeline.
     809            0 :             || err
     810            0 :                 .downcast_ref::<NotInitialized>()
     811            0 :                 .map(NotInitialized::is_stopping)
     812            0 :                 .unwrap_or_default();
     813            0 :         if cancelled {
     814            0 :             Self::Cancelled
     815              :         } else {
     816            0 :             Self::Other(Arc::new(err))
     817              :         }
     818            0 :     }
     819              : }
     820              : 
     821              : impl From<layer_manager::Shutdown> for FlushLayerError {
     822            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     823            0 :         FlushLayerError::Cancelled
     824            0 :     }
     825              : }
     826              : 
     827              : #[derive(thiserror::Error, Debug)]
     828              : pub enum GetVectoredError {
     829              :     #[error("timeline shutting down")]
     830              :     Cancelled,
     831              : 
     832              :     #[error("requested too many keys: {0} > {1}")]
     833              :     Oversized(u64, u64),
     834              : 
     835              :     #[error("requested at invalid LSN: {0}")]
     836              :     InvalidLsn(Lsn),
     837              : 
     838              :     #[error("requested key not found: {0}")]
     839              :     MissingKey(Box<MissingKeyError>),
     840              : 
     841              :     #[error("ancestry walk")]
     842              :     GetReadyAncestorError(#[source] GetReadyAncestorError),
     843              : 
     844              :     #[error(transparent)]
     845              :     Other(#[from] anyhow::Error),
     846              : }
     847              : 
     848              : impl From<GetReadyAncestorError> for GetVectoredError {
     849            1 :     fn from(value: GetReadyAncestorError) -> Self {
     850              :         use GetReadyAncestorError::*;
     851            1 :         match value {
     852            0 :             Cancelled => GetVectoredError::Cancelled,
     853              :             AncestorLsnTimeout(_) | BadState { .. } => {
     854            1 :                 GetVectoredError::GetReadyAncestorError(value)
     855              :             }
     856              :         }
     857            1 :     }
     858              : }
     859              : 
     860              : #[derive(thiserror::Error, Debug)]
     861              : pub enum GetReadyAncestorError {
     862              :     #[error("ancestor LSN wait error")]
     863              :     AncestorLsnTimeout(#[from] WaitLsnError),
     864              : 
     865              :     #[error("bad state on timeline {timeline_id}: {state:?}")]
     866              :     BadState {
     867              :         timeline_id: TimelineId,
     868              :         state: TimelineState,
     869              :     },
     870              : 
     871              :     #[error("cancelled")]
     872              :     Cancelled,
     873              : }
     874              : 
     875              : #[derive(Clone, Copy)]
     876              : pub enum LogicalSizeCalculationCause {
     877              :     Initial,
     878              :     ConsumptionMetricsSyntheticSize,
     879              :     EvictionTaskImitation,
     880              :     TenantSizeHandler,
     881              : }
     882              : 
     883              : pub enum GetLogicalSizePriority {
     884              :     User,
     885              :     Background,
     886              : }
     887              : 
     888              : #[derive(Debug, enumset::EnumSetType)]
     889              : pub(crate) enum CompactFlags {
     890              :     ForceRepartition,
     891              :     ForceImageLayerCreation,
     892              :     ForceL0Compaction,
     893              :     OnlyL0Compaction,
     894              :     EnhancedGcBottomMostCompaction,
     895              :     DryRun,
     896              :     /// Makes image compaction yield if there's pending L0 compaction. This should always be used in
     897              :     /// the background compaction task, since we want to aggressively compact down L0 to bound
     898              :     /// read amplification.
     899              :     ///
     900              :     /// It only makes sense to use this when `compaction_l0_first` is enabled (such that we yield to
     901              :     /// an L0 compaction pass), and without `OnlyL0Compaction` (L0 compaction shouldn't yield for L0
     902              :     /// compaction).
     903              :     YieldForL0,
     904              : }
     905              : 
     906              : #[serde_with::serde_as]
     907            0 : #[derive(Debug, Clone, serde::Deserialize)]
     908              : pub(crate) struct CompactRequest {
     909              :     pub compact_key_range: Option<CompactKeyRange>,
     910              :     pub compact_lsn_range: Option<CompactLsnRange>,
     911              :     /// Whether the compaction job should be scheduled.
     912              :     #[serde(default)]
     913              :     pub scheduled: bool,
     914              :     /// Whether the compaction job should be split across key ranges.
     915              :     #[serde(default)]
     916              :     pub sub_compaction: bool,
     917              :     /// Max job size for each subcompaction job.
     918              :     pub sub_compaction_max_job_size_mb: Option<u64>,
     919              : }
     920              : 
     921            0 : #[derive(Debug, Clone, serde::Deserialize)]
     922              : pub(crate) struct MarkInvisibleRequest {
     923              :     #[serde(default)]
     924              :     pub is_visible: Option<bool>,
     925              : }
     926              : 
     927              : #[derive(Debug, Clone, Default)]
     928              : pub(crate) struct CompactOptions {
     929              :     pub flags: EnumSet<CompactFlags>,
     930              :     /// If set, the compaction will only compact the key range specified by this option.
     931              :     /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
     932              :     pub compact_key_range: Option<CompactKeyRange>,
     933              :     /// If set, the compaction will only compact the LSN within this value.
     934              :     /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
     935              :     pub compact_lsn_range: Option<CompactLsnRange>,
     936              :     /// Enable sub-compaction (split compaction job across key ranges).
     937              :     /// This option is only used by GC compaction.
     938              :     pub sub_compaction: bool,
     939              :     /// Set job size for the GC compaction.
     940              :     /// This option is only used by GC compaction.
     941              :     pub sub_compaction_max_job_size_mb: Option<u64>,
     942              : }
     943              : 
     944              : impl std::fmt::Debug for Timeline {
     945            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
     946            0 :         write!(f, "Timeline<{}>", self.timeline_id)
     947            0 :     }
     948              : }
     949              : 
     950              : #[derive(thiserror::Error, Debug, Clone)]
     951              : pub enum WaitLsnError {
     952              :     // Called on a timeline which is shutting down
     953              :     #[error("Shutdown")]
     954              :     Shutdown,
     955              : 
     956              :     // Called on an timeline not in active state or shutting down
     957              :     #[error("Bad timeline state: {0:?}")]
     958              :     BadState(TimelineState),
     959              : 
     960              :     // Timeout expired while waiting for LSN to catch up with goal.
     961              :     #[error("{0}")]
     962              :     Timeout(String),
     963              : }
     964              : 
     965              : impl WaitLsnError {
     966            0 :     pub(crate) fn is_cancel(&self) -> bool {
     967            0 :         match self {
     968            0 :             WaitLsnError::Shutdown => true,
     969            0 :             WaitLsnError::BadState(timeline_state) => match timeline_state {
     970            0 :                 TimelineState::Loading => false,
     971            0 :                 TimelineState::Active => false,
     972            0 :                 TimelineState::Stopping => true,
     973            0 :                 TimelineState::Broken { .. } => false,
     974              :             },
     975            0 :             WaitLsnError::Timeout(_) => false,
     976              :         }
     977            0 :     }
     978            0 :     pub(crate) fn into_anyhow(self) -> anyhow::Error {
     979            0 :         match self {
     980            0 :             WaitLsnError::Shutdown => anyhow::Error::new(self),
     981            0 :             WaitLsnError::BadState(_) => anyhow::Error::new(self),
     982            0 :             WaitLsnError::Timeout(_) => anyhow::Error::new(self),
     983              :         }
     984            0 :     }
     985              : }
     986              : 
     987              : impl From<WaitLsnError> for tonic::Status {
     988            0 :     fn from(err: WaitLsnError) -> Self {
     989              :         use tonic::Code;
     990            0 :         let code = if err.is_cancel() {
     991            0 :             Code::Unavailable
     992              :         } else {
     993            0 :             Code::Internal
     994              :         };
     995            0 :         tonic::Status::new(code, err.to_string())
     996            0 :     }
     997              : }
     998              : 
     999              : // The impls below achieve cancellation mapping for errors.
    1000              : // Perhaps there's a way of achieving this with less cruft.
    1001              : 
    1002              : impl From<CreateImageLayersError> for CompactionError {
    1003            0 :     fn from(e: CreateImageLayersError) -> Self {
    1004            0 :         match e {
    1005            0 :             CreateImageLayersError::Cancelled => CompactionError::new_cancelled(),
    1006            0 :             CreateImageLayersError::Other(e) => {
    1007            0 :                 CompactionError::Other(e.context("create image layers"))
    1008              :             }
    1009            0 :             _ => CompactionError::Other(e.into()),
    1010              :         }
    1011            0 :     }
    1012              : }
    1013              : 
    1014              : impl From<CreateImageLayersError> for FlushLayerError {
    1015            0 :     fn from(e: CreateImageLayersError) -> Self {
    1016            0 :         match e {
    1017            0 :             CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
    1018            0 :             any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
    1019              :         }
    1020            0 :     }
    1021              : }
    1022              : 
    1023              : impl From<PageReconstructError> for CreateImageLayersError {
    1024            0 :     fn from(e: PageReconstructError) -> Self {
    1025            0 :         match e {
    1026            0 :             PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
    1027            0 :             _ => CreateImageLayersError::PageReconstructError(e),
    1028              :         }
    1029            0 :     }
    1030              : }
    1031              : 
    1032              : impl From<super::storage_layer::errors::PutError> for CreateImageLayersError {
    1033            0 :     fn from(e: super::storage_layer::errors::PutError) -> Self {
    1034            0 :         if e.is_cancel() {
    1035            0 :             CreateImageLayersError::Cancelled
    1036              :         } else {
    1037            0 :             CreateImageLayersError::Other(e.into_anyhow())
    1038              :         }
    1039            0 :     }
    1040              : }
    1041              : 
    1042              : impl From<GetVectoredError> for CreateImageLayersError {
    1043            0 :     fn from(e: GetVectoredError) -> Self {
    1044            0 :         match e {
    1045            0 :             GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
    1046            0 :             _ => CreateImageLayersError::GetVectoredError(e),
    1047              :         }
    1048            0 :     }
    1049              : }
    1050              : 
    1051              : impl From<GetVectoredError> for PageReconstructError {
    1052            3 :     fn from(e: GetVectoredError) -> Self {
    1053            3 :         match e {
    1054            0 :             GetVectoredError::Cancelled => PageReconstructError::Cancelled,
    1055            0 :             GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
    1056            0 :             err @ GetVectoredError::Oversized(_, _) => PageReconstructError::Other(err.into()),
    1057            2 :             GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
    1058            1 :             GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
    1059            0 :             GetVectoredError::Other(err) => PageReconstructError::Other(err),
    1060              :         }
    1061            3 :     }
    1062              : }
    1063              : 
    1064              : impl From<GetReadyAncestorError> for PageReconstructError {
    1065            1 :     fn from(e: GetReadyAncestorError) -> Self {
    1066              :         use GetReadyAncestorError::*;
    1067            1 :         match e {
    1068            0 :             AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
    1069            1 :             bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
    1070            0 :             Cancelled => PageReconstructError::Cancelled,
    1071              :         }
    1072            1 :     }
    1073              : }
    1074              : 
    1075              : pub(crate) enum WaitLsnTimeout {
    1076              :     Custom(Duration),
    1077              :     // Use the [`PageServerConf::wait_lsn_timeout`] default
    1078              :     Default,
    1079              : }
    1080              : 
    1081              : pub(crate) enum WaitLsnWaiter<'a> {
    1082              :     Timeline(&'a Timeline),
    1083              :     Tenant,
    1084              :     PageService,
    1085              :     HttpEndpoint,
    1086              :     BaseBackupCache,
    1087              : }
    1088              : 
    1089              : /// Argument to [`Timeline::shutdown`].
    1090              : #[derive(Debug, Clone, Copy)]
    1091              : pub(crate) enum ShutdownMode {
    1092              :     /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk. This method can
    1093              :     /// take multiple seconds for a busy timeline.
    1094              :     ///
    1095              :     /// While we are flushing, we continue to accept read I/O for LSNs ingested before
    1096              :     /// the call to [`Timeline::shutdown`].
    1097              :     FreezeAndFlush,
    1098              :     /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
    1099              :     /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
    1100              :     /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
    1101              :     /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
    1102              :     Reload,
    1103              :     /// Shut down immediately, without waiting for any open layers to flush.
    1104              :     Hard,
    1105              : }
    1106              : 
    1107              : #[allow(clippy::large_enum_variant, reason = "TODO")]
    1108              : enum ImageLayerCreationOutcome {
    1109              :     /// We generated an image layer
    1110              :     Generated {
    1111              :         unfinished_image_layer: ImageLayerWriter,
    1112              :     },
    1113              :     /// The key range is empty
    1114              :     Empty,
    1115              :     /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
    1116              :     /// the image layer creation.
    1117              :     Skip,
    1118              : }
    1119              : 
    1120              : enum RepartitionError {
    1121              :     Other(anyhow::Error),
    1122              :     CollectKeyspace(CollectKeySpaceError),
    1123              : }
    1124              : 
    1125              : impl RepartitionError {
    1126            0 :     fn is_cancel(&self) -> bool {
    1127            0 :         match self {
    1128            0 :             RepartitionError::Other(_) => false,
    1129            0 :             RepartitionError::CollectKeyspace(e) => e.is_cancel(),
    1130              :         }
    1131            0 :     }
    1132            0 :     fn into_anyhow(self) -> anyhow::Error {
    1133            0 :         match self {
    1134            0 :             RepartitionError::Other(e) => e,
    1135            0 :             RepartitionError::CollectKeyspace(e) => e.into_anyhow(),
    1136              :         }
    1137            0 :     }
    1138              : }
    1139              : 
    1140              : /// Public interface functions
    1141              : impl Timeline {
    1142              :     /// Get the LSN where this branch was created
    1143           80 :     pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
    1144           80 :         self.ancestor_lsn
    1145           80 :     }
    1146              : 
    1147              :     /// Get the ancestor's timeline id
    1148          416 :     pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
    1149          416 :         self.ancestor_timeline
    1150          416 :             .as_ref()
    1151          416 :             .map(|ancestor| ancestor.timeline_id)
    1152          416 :     }
    1153              : 
    1154              :     /// Get the ancestor timeline
    1155            1 :     pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
    1156            1 :         self.ancestor_timeline.as_ref()
    1157            1 :     }
    1158              : 
    1159              :     /// Get the bytes written since the PITR cutoff on this branch, and
    1160              :     /// whether this branch's ancestor_lsn is within its parent's PITR.
    1161            0 :     pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
    1162              :         // TODO: for backwards compatibility, we return the full history back to 0 when the PITR
    1163              :         // cutoff has not yet been initialized. This should return None instead, but this is exposed
    1164              :         // in external HTTP APIs and callers may not handle a null value.
    1165            0 :         let gc_info = self.gc_info.read().unwrap();
    1166            0 :         let history = self
    1167            0 :             .get_last_record_lsn()
    1168            0 :             .checked_sub(gc_info.cutoffs.time.unwrap_or_default())
    1169            0 :             .unwrap_or_default()
    1170            0 :             .0;
    1171            0 :         (history, gc_info.within_ancestor_pitr)
    1172            0 :     }
    1173              : 
    1174              :     /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
    1175       425931 :     pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
    1176       425931 :         self.applied_gc_cutoff_lsn.read()
    1177       425931 :     }
    1178              : 
    1179              :     /// Read timeline's planned GC cutoff: this is the logical end of history that users are allowed
    1180              :     /// to read (based on configured PITR), even if physically we have more history. Returns None
    1181              :     /// if the PITR cutoff has not yet been initialized.
    1182            0 :     pub(crate) fn get_gc_cutoff_lsn(&self) -> Option<Lsn> {
    1183            0 :         self.gc_info.read().unwrap().cutoffs.time
    1184            0 :     }
    1185              : 
    1186              :     /// Look up given page version.
    1187              :     ///
    1188              :     /// If a remote layer file is needed, it is downloaded as part of this
    1189              :     /// call.
    1190              :     ///
    1191              :     /// This method enforces [`Self::pagestream_throttle`] internally.
    1192              :     ///
    1193              :     /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
    1194              :     /// abstraction above this needs to store suitable metadata to track what
    1195              :     /// data exists with what keys, in separate metadata entries. If a
    1196              :     /// non-existent key is requested, we may incorrectly return a value from
    1197              :     /// an ancestor branch, for example, or waste a lot of cycles chasing the
    1198              :     /// non-existing key.
    1199              :     ///
    1200              :     /// # Cancel-Safety
    1201              :     ///
    1202              :     /// This method is cancellation-safe.
    1203              :     #[inline(always)]
    1204       301276 :     pub(crate) async fn get(
    1205       301276 :         &self,
    1206       301276 :         key: Key,
    1207       301276 :         lsn: Lsn,
    1208       301276 :         ctx: &RequestContext,
    1209       301276 :     ) -> Result<Bytes, PageReconstructError> {
    1210       301276 :         if !lsn.is_valid() {
    1211            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
    1212       301276 :         }
    1213              : 
    1214              :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
    1215              :         // already checked the key against the shard_identity when looking up the Timeline from
    1216              :         // page_service.
    1217       301276 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
    1218              : 
    1219       301276 :         let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
    1220              : 
    1221       301276 :         let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
    1222              : 
    1223       301276 :         let vectored_res = self
    1224       301276 :             .get_vectored_impl(query, &mut reconstruct_state, ctx)
    1225       301276 :             .await;
    1226              : 
    1227       301276 :         let key_value = vectored_res?.pop_first();
    1228       301273 :         match key_value {
    1229       301267 :             Some((got_key, value)) => {
    1230       301267 :                 if got_key != key {
    1231            0 :                     error!(
    1232            0 :                         "Expected {}, but singular vectored get returned {}",
    1233              :                         key, got_key
    1234              :                     );
    1235            0 :                     Err(PageReconstructError::Other(anyhow!(
    1236            0 :                         "Singular vectored get returned wrong key"
    1237            0 :                     )))
    1238              :                 } else {
    1239       301267 :                     value
    1240              :                 }
    1241              :             }
    1242            6 :             None => Err(PageReconstructError::MissingKey(Box::new(
    1243            6 :                 MissingKeyError {
    1244            6 :                     keyspace: KeySpace::single(key..key.next()),
    1245            6 :                     shard: self.shard_identity.get_shard_number(&key),
    1246            6 :                     original_hwm_lsn: lsn,
    1247            6 :                     ancestor_lsn: None,
    1248            6 :                     backtrace: None,
    1249            6 :                     read_path: None,
    1250            6 :                     query: None,
    1251            6 :                 },
    1252            6 :             ))),
    1253              :         }
    1254       301276 :     }
    1255              : 
    1256              :     #[inline(always)]
    1257            0 :     pub(crate) async fn debug_get(
    1258            0 :         &self,
    1259            0 :         key: Key,
    1260            0 :         lsn: Lsn,
    1261            0 :         ctx: &RequestContext,
    1262            0 :         reconstruct_state: &mut ValuesReconstructState,
    1263            0 :     ) -> Result<Bytes, PageReconstructError> {
    1264            0 :         if !lsn.is_valid() {
    1265            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
    1266            0 :         }
    1267              : 
    1268              :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
    1269              :         // already checked the key against the shard_identity when looking up the Timeline from
    1270              :         // page_service.
    1271            0 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
    1272              : 
    1273            0 :         let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
    1274            0 :         let vectored_res = self
    1275            0 :             .debug_get_vectored_impl(query, reconstruct_state, ctx)
    1276            0 :             .await;
    1277              : 
    1278            0 :         let key_value = vectored_res?.pop_first();
    1279            0 :         match key_value {
    1280            0 :             Some((got_key, value)) => {
    1281            0 :                 if got_key != key {
    1282            0 :                     error!(
    1283            0 :                         "Expected {}, but singular vectored get returned {}",
    1284              :                         key, got_key
    1285              :                     );
    1286            0 :                     Err(PageReconstructError::Other(anyhow!(
    1287            0 :                         "Singular vectored get returned wrong key"
    1288            0 :                     )))
    1289              :                 } else {
    1290            0 :                     value
    1291              :                 }
    1292              :             }
    1293            0 :             None => Err(PageReconstructError::MissingKey(Box::new(
    1294            0 :                 MissingKeyError {
    1295            0 :                     keyspace: KeySpace::single(key..key.next()),
    1296            0 :                     shard: self.shard_identity.get_shard_number(&key),
    1297            0 :                     original_hwm_lsn: lsn,
    1298            0 :                     ancestor_lsn: None,
    1299            0 :                     backtrace: None,
    1300            0 :                     read_path: None,
    1301            0 :                     query: None,
    1302            0 :                 },
    1303            0 :             ))),
    1304              :         }
    1305            0 :     }
    1306              : 
    1307              :     pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
    1308              : 
    1309              :     /// Look up multiple page versions at a given LSN
    1310              :     ///
    1311              :     /// This naive implementation will be replaced with a more efficient one
    1312              :     /// which actually vectorizes the read path.
    1313        10894 :     pub(crate) async fn get_vectored(
    1314        10894 :         &self,
    1315        10894 :         query: VersionedKeySpaceQuery,
    1316        10894 :         io_concurrency: super::storage_layer::IoConcurrency,
    1317        10894 :         ctx: &RequestContext,
    1318        10894 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1319        10894 :         let total_keyspace = query.total_keyspace();
    1320              : 
    1321        10894 :         let key_count = total_keyspace.total_raw_size();
    1322        10894 :         if key_count > self.conf.max_get_vectored_keys.get() {
    1323            0 :             return Err(GetVectoredError::Oversized(
    1324            0 :                 key_count as u64,
    1325            0 :                 self.conf.max_get_vectored_keys.get() as u64,
    1326            0 :             ));
    1327        10894 :         }
    1328              : 
    1329        34207 :         for range in &total_keyspace.ranges {
    1330        23313 :             let mut key = range.start;
    1331        65342 :             while key != range.end {
    1332        42029 :                 assert!(!self.shard_identity.is_key_disposable(&key));
    1333        42029 :                 key = key.next();
    1334              :             }
    1335              :         }
    1336              : 
    1337        10894 :         trace!(
    1338            0 :             "get vectored query {} from task kind {:?}",
    1339              :             query,
    1340            0 :             ctx.task_kind(),
    1341              :         );
    1342              : 
    1343        10894 :         let start = crate::metrics::GET_VECTORED_LATENCY
    1344        10894 :             .for_task_kind(ctx.task_kind())
    1345        10894 :             .map(|metric| (metric, Instant::now()));
    1346              : 
    1347        10894 :         let res = self
    1348        10894 :             .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
    1349        10894 :             .await;
    1350              : 
    1351        10894 :         if let Some((metric, start)) = start {
    1352            0 :             let elapsed = start.elapsed();
    1353            0 :             metric.observe(elapsed.as_secs_f64());
    1354        10894 :         }
    1355              : 
    1356        10894 :         res
    1357        10894 :     }
    1358              : 
    1359              :     /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
    1360              :     /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
    1361              :     /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
    1362              :     /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
    1363              :     /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
    1364              :     /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
    1365              :     /// the scan operation will not cause OOM in the future.
    1366            8 :     pub(crate) async fn scan(
    1367            8 :         &self,
    1368            8 :         keyspace: KeySpace,
    1369            8 :         lsn: Lsn,
    1370            8 :         ctx: &RequestContext,
    1371            8 :         io_concurrency: super::storage_layer::IoConcurrency,
    1372            8 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1373            8 :         if !lsn.is_valid() {
    1374            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1375            8 :         }
    1376              : 
    1377            8 :         trace!(
    1378            0 :             "key-value scan request for {:?}@{} from task kind {:?}",
    1379              :             keyspace,
    1380              :             lsn,
    1381            0 :             ctx.task_kind()
    1382              :         );
    1383              : 
    1384              :         // We should generalize this into Keyspace::contains in the future.
    1385           16 :         for range in &keyspace.ranges {
    1386            8 :             if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
    1387            8 :                 || range.end.field1 > METADATA_KEY_END_PREFIX
    1388              :             {
    1389            0 :                 return Err(GetVectoredError::Other(anyhow::anyhow!(
    1390            0 :                     "only metadata keyspace can be scanned"
    1391            0 :                 )));
    1392            8 :             }
    1393              :         }
    1394              : 
    1395            8 :         let start = crate::metrics::SCAN_LATENCY
    1396            8 :             .for_task_kind(ctx.task_kind())
    1397            8 :             .map(ScanLatencyOngoingRecording::start_recording);
    1398              : 
    1399            8 :         let query = VersionedKeySpaceQuery::uniform(keyspace, lsn);
    1400              : 
    1401            8 :         let vectored_res = self
    1402            8 :             .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
    1403            8 :             .await;
    1404              : 
    1405            8 :         if let Some(recording) = start {
    1406            0 :             recording.observe();
    1407            8 :         }
    1408              : 
    1409            8 :         vectored_res
    1410            8 :     }
    1411              : 
    1412       312414 :     pub(super) async fn get_vectored_impl(
    1413       312414 :         &self,
    1414       312414 :         query: VersionedKeySpaceQuery,
    1415       312414 :         reconstruct_state: &mut ValuesReconstructState,
    1416       312414 :         ctx: &RequestContext,
    1417       312414 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1418       312414 :         if query.is_empty() {
    1419            0 :             return Ok(BTreeMap::default());
    1420       312414 :         }
    1421              : 
    1422       312414 :         let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
    1423       312414 :             Some(ReadPath::new(
    1424       312414 :                 query.total_keyspace(),
    1425       312414 :                 query.high_watermark_lsn()?,
    1426              :             ))
    1427              :         } else {
    1428            0 :             None
    1429              :         };
    1430              : 
    1431       312414 :         reconstruct_state.read_path = read_path;
    1432              : 
    1433       312414 :         let redo_attempt_type = if ctx.task_kind() == TaskKind::Compaction {
    1434            0 :             RedoAttemptType::LegacyCompaction
    1435              :         } else {
    1436       312414 :             RedoAttemptType::ReadPage
    1437              :         };
    1438              : 
    1439       312414 :         let traversal_res: Result<(), _> = {
    1440       312414 :             let ctx = RequestContextBuilder::from(ctx)
    1441       312414 :                 .perf_span(|crnt_perf_span| {
    1442            0 :                     info_span!(
    1443              :                         target: PERF_TRACE_TARGET,
    1444            0 :                         parent: crnt_perf_span,
    1445              :                         "PLAN_IO",
    1446              :                     )
    1447            0 :                 })
    1448       312414 :                 .attached_child();
    1449              : 
    1450       312414 :             self.get_vectored_reconstruct_data(query.clone(), reconstruct_state, &ctx)
    1451       312414 :                 .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
    1452       312414 :                 .await
    1453              :         };
    1454              : 
    1455       312414 :         if let Err(err) = traversal_res {
    1456              :             // Wait for all the spawned IOs to complete.
    1457              :             // See comments on `spawn_io` inside `storage_layer` for more details.
    1458            8 :             let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
    1459            8 :                 .into_values()
    1460            8 :                 .map(|state| state.collect_pending_ios())
    1461            8 :                 .collect::<FuturesUnordered<_>>();
    1462            8 :             while collect_futs.next().await.is_some() {}
    1463              : 
    1464              :             // Enrich the missing key error with the original query.
    1465            8 :             if let GetVectoredError::MissingKey(mut missing_err) = err {
    1466            7 :                 missing_err.enrich(query.clone());
    1467            7 :                 return Err(GetVectoredError::MissingKey(missing_err));
    1468            1 :             }
    1469              : 
    1470            1 :             return Err(err);
    1471       312406 :         };
    1472              : 
    1473       312406 :         let layers_visited = reconstruct_state.get_layers_visited();
    1474              : 
    1475       312406 :         let ctx = RequestContextBuilder::from(ctx)
    1476       312406 :             .perf_span(|crnt_perf_span| {
    1477            0 :                 info_span!(
    1478              :                     target: PERF_TRACE_TARGET,
    1479            0 :                     parent: crnt_perf_span,
    1480              :                     "RECONSTRUCT",
    1481              :                 )
    1482            0 :             })
    1483       312406 :             .attached_child();
    1484              : 
    1485       312406 :         let futs = FuturesUnordered::new();
    1486       363535 :         for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
    1487       363535 :             let req_lsn_for_key = query.map_key_to_lsn(&key);
    1488              : 
    1489       363535 :             futs.push({
    1490       363535 :                 let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
    1491       363535 :                 let ctx = RequestContextBuilder::from(&ctx)
    1492       363535 :                     .perf_span(|crnt_perf_span| {
    1493            0 :                         info_span!(
    1494              :                             target: PERF_TRACE_TARGET,
    1495            0 :                             parent: crnt_perf_span,
    1496              :                             "RECONSTRUCT_KEY",
    1497              :                             key = %key,
    1498              :                         )
    1499            0 :                     })
    1500       363535 :                     .attached_child();
    1501              : 
    1502       363535 :                 async move {
    1503       363535 :                     assert_eq!(state.situation, ValueReconstructSituation::Complete);
    1504              : 
    1505       363535 :                     let res = state
    1506       363535 :                         .collect_pending_ios()
    1507       363535 :                         .maybe_perf_instrument(&ctx, |crnt_perf_span| {
    1508            0 :                             info_span!(
    1509              :                                 target: PERF_TRACE_TARGET,
    1510            0 :                                 parent: crnt_perf_span,
    1511              :                                 "WAIT_FOR_IO_COMPLETIONS",
    1512              :                             )
    1513            0 :                         })
    1514       363535 :                         .await;
    1515              : 
    1516       363535 :                     let converted = match res {
    1517       363535 :                         Ok(ok) => ok,
    1518            0 :                         Err(err) => {
    1519            0 :                             return (key, Err(err));
    1520              :                         }
    1521              :                     };
    1522       363535 :                     DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
    1523              : 
    1524              :                     // The walredo module expects the records to be descending in terms of Lsn.
    1525              :                     // And we submit the IOs in that order, so, there shuold be no need to sort here.
    1526       363535 :                     debug_assert!(
    1527       363535 :                         converted
    1528       363535 :                             .records
    1529      1403254 :                             .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
    1530            0 :                         "{converted:?}"
    1531              :                     );
    1532              : 
    1533       363535 :                     let walredo_deltas = converted.num_deltas();
    1534       363535 :                     let walredo_res = walredo_self
    1535       363535 :                         .reconstruct_value(key, req_lsn_for_key, converted, redo_attempt_type)
    1536       363535 :                         .maybe_perf_instrument(&ctx, |crnt_perf_span| {
    1537            0 :                             info_span!(
    1538              :                                 target: PERF_TRACE_TARGET,
    1539            0 :                                 parent: crnt_perf_span,
    1540              :                                 "WALREDO",
    1541              :                                 deltas = %walredo_deltas,
    1542              :                             )
    1543            0 :                         })
    1544       363535 :                         .await;
    1545              : 
    1546       363535 :                     (key, walredo_res)
    1547       363535 :                 }
    1548              :             });
    1549              :         }
    1550              : 
    1551       312406 :         let results = futs
    1552       312406 :             .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
    1553       312406 :             .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
    1554       312406 :             .await;
    1555              : 
    1556              :         // For aux file keys (v1 or v2) the vectored read path does not return an error
    1557              :         // when they're missing. Instead they are omitted from the resulting btree
    1558              :         // (this is a requirement, not a bug). Skip updating the metric in these cases
    1559              :         // to avoid infinite results.
    1560       312406 :         if !results.is_empty() {
    1561       312227 :             if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
    1562            0 :                 let total_keyspace = query.total_keyspace();
    1563            0 :                 let max_request_lsn = query.high_watermark_lsn().expect("Validated previously");
    1564              : 
    1565              :                 static LOG_PACER: Lazy<Mutex<RateLimit>> =
    1566            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
    1567            0 :                 LOG_PACER.lock().unwrap().call(|| {
    1568            0 :                     let num_keys = total_keyspace.total_raw_size();
    1569            0 :                     let num_pages = results.len();
    1570            0 :                     tracing::info!(
    1571            0 :                       shard_id = %self.tenant_shard_id.shard_slug(),
    1572              :                       lsn = %max_request_lsn,
    1573            0 :                       "Vectored read for {total_keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
    1574              :                     );
    1575            0 :                 });
    1576       312227 :             }
    1577              : 
    1578              :             // Records the number of layers visited in a few different ways:
    1579              :             //
    1580              :             // * LAYERS_PER_READ: all layers count towards every read in the batch, because each
    1581              :             //   layer directly affects its observed latency.
    1582              :             //
    1583              :             // * LAYERS_PER_READ_BATCH: all layers count towards each batch, to get the per-batch
    1584              :             //   layer visits and access cost.
    1585              :             //
    1586              :             // * LAYERS_PER_READ_AMORTIZED: the average layer count per read, to get the amortized
    1587              :             //   read amplification after batching.
    1588       312227 :             let layers_visited = layers_visited as f64;
    1589       312227 :             let avg_layers_visited = layers_visited / results.len() as f64;
    1590       312227 :             LAYERS_PER_READ_BATCH_GLOBAL.observe(layers_visited);
    1591       675762 :             for _ in &results {
    1592       363535 :                 self.metrics.layers_per_read.observe(layers_visited);
    1593       363535 :                 LAYERS_PER_READ_GLOBAL.observe(layers_visited);
    1594       363535 :                 LAYERS_PER_READ_AMORTIZED_GLOBAL.observe(avg_layers_visited);
    1595       363535 :             }
    1596          179 :         }
    1597              : 
    1598       312406 :         Ok(results)
    1599       312414 :     }
    1600              : 
    1601              :     // A copy of the get_vectored_impl method except that we store the image and wal records into `reconstruct_state`.
    1602              :     // This is only used in the http getpage call for debugging purpose.
    1603            0 :     pub(super) async fn debug_get_vectored_impl(
    1604            0 :         &self,
    1605            0 :         query: VersionedKeySpaceQuery,
    1606            0 :         reconstruct_state: &mut ValuesReconstructState,
    1607            0 :         ctx: &RequestContext,
    1608            0 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1609            0 :         if query.is_empty() {
    1610            0 :             return Ok(BTreeMap::default());
    1611            0 :         }
    1612              : 
    1613            0 :         let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
    1614            0 :             Some(ReadPath::new(
    1615            0 :                 query.total_keyspace(),
    1616            0 :                 query.high_watermark_lsn()?,
    1617              :             ))
    1618              :         } else {
    1619            0 :             None
    1620              :         };
    1621              : 
    1622            0 :         reconstruct_state.read_path = read_path;
    1623              : 
    1624            0 :         let traversal_res: Result<(), _> = self
    1625            0 :             .get_vectored_reconstruct_data(query.clone(), reconstruct_state, ctx)
    1626            0 :             .await;
    1627              : 
    1628            0 :         if let Err(err) = traversal_res {
    1629              :             // Wait for all the spawned IOs to complete.
    1630              :             // See comments on `spawn_io` inside `storage_layer` for more details.
    1631            0 :             let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
    1632            0 :                 .into_values()
    1633            0 :                 .map(|state| state.collect_pending_ios())
    1634            0 :                 .collect::<FuturesUnordered<_>>();
    1635            0 :             while collect_futs.next().await.is_some() {}
    1636            0 :             return Err(err);
    1637            0 :         };
    1638              : 
    1639            0 :         let reconstruct_state = Arc::new(Mutex::new(reconstruct_state));
    1640            0 :         let futs = FuturesUnordered::new();
    1641              : 
    1642            0 :         for (key, state) in std::mem::take(&mut reconstruct_state.lock().unwrap().keys) {
    1643            0 :             let req_lsn_for_key = query.map_key_to_lsn(&key);
    1644            0 :             futs.push({
    1645            0 :                 let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
    1646            0 :                 let rc_clone = Arc::clone(&reconstruct_state);
    1647              : 
    1648            0 :                 async move {
    1649            0 :                     assert_eq!(state.situation, ValueReconstructSituation::Complete);
    1650              : 
    1651            0 :                     let converted = match state.collect_pending_ios().await {
    1652            0 :                         Ok(ok) => ok,
    1653            0 :                         Err(err) => {
    1654            0 :                             return (key, Err(err));
    1655              :                         }
    1656              :                     };
    1657            0 :                     DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
    1658              : 
    1659              :                     // The walredo module expects the records to be descending in terms of Lsn.
    1660              :                     // And we submit the IOs in that order, so, there shuold be no need to sort here.
    1661            0 :                     debug_assert!(
    1662            0 :                         converted
    1663            0 :                             .records
    1664            0 :                             .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
    1665            0 :                         "{converted:?}"
    1666              :                     );
    1667            0 :                     {
    1668            0 :                         let mut guard = rc_clone.lock().unwrap();
    1669            0 :                         guard.set_debug_state(&converted);
    1670            0 :                     }
    1671              :                     (
    1672            0 :                         key,
    1673            0 :                         walredo_self
    1674            0 :                             .reconstruct_value(
    1675            0 :                                 key,
    1676            0 :                                 req_lsn_for_key,
    1677            0 :                                 converted,
    1678            0 :                                 RedoAttemptType::ReadPage,
    1679            0 :                             )
    1680            0 :                             .await,
    1681              :                     )
    1682            0 :                 }
    1683              :             });
    1684              :         }
    1685              : 
    1686            0 :         let results = futs
    1687            0 :             .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
    1688            0 :             .await;
    1689              : 
    1690            0 :         Ok(results)
    1691            0 :     }
    1692              : 
    1693              :     /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
    1694       138202 :     pub(crate) fn get_last_record_lsn(&self) -> Lsn {
    1695       138202 :         self.last_record_lsn.load().last
    1696       138202 :     }
    1697              : 
    1698            0 :     pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
    1699            0 :         self.last_record_lsn.load().prev
    1700            0 :     }
    1701              : 
    1702              :     /// Atomically get both last and prev.
    1703          117 :     pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
    1704          117 :         self.last_record_lsn.load()
    1705          117 :     }
    1706              : 
    1707              :     /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
    1708              :     /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
    1709            0 :     pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
    1710            0 :         self.last_record_lsn.status_receiver()
    1711            0 :     }
    1712              : 
    1713          609 :     pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
    1714          609 :         self.disk_consistent_lsn.load()
    1715          609 :     }
    1716              : 
    1717              :     /// remote_consistent_lsn from the perspective of the tenant's current generation,
    1718              :     /// not validated with control plane yet.
    1719              :     /// See [`Self::get_remote_consistent_lsn_visible`].
    1720            2 :     pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
    1721            2 :         self.remote_client.remote_consistent_lsn_projected()
    1722            2 :     }
    1723              : 
    1724              :     /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
    1725              :     /// i.e. a value of remote_consistent_lsn_projected which has undergone
    1726              :     /// generation validation in the deletion queue.
    1727            0 :     pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
    1728            0 :         self.remote_client.remote_consistent_lsn_visible()
    1729            0 :     }
    1730              : 
    1731              :     /// The sum of the file size of all historic layers in the layer map.
    1732              :     /// This method makes no distinction between local and remote layers.
    1733              :     /// Hence, the result **does not represent local filesystem usage**.
    1734            0 :     pub(crate) async fn layer_size_sum(&self) -> u64 {
    1735            0 :         let guard = self
    1736            0 :             .layers
    1737            0 :             .read(LayerManagerLockHolder::GetLayerMapInfo)
    1738            0 :             .await;
    1739            0 :         guard.layer_size_sum()
    1740            0 :     }
    1741              : 
    1742            0 :     pub(crate) fn resident_physical_size(&self) -> u64 {
    1743            0 :         self.metrics.resident_physical_size_get()
    1744            0 :     }
    1745              : 
    1746            0 :     pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
    1747            0 :         array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
    1748            0 :     }
    1749              : 
    1750              :     ///
    1751              :     /// Wait until WAL has been received and processed up to this LSN.
    1752              :     ///
    1753              :     /// You should call this before any of the other get_* or list_* functions. Calling
    1754              :     /// those functions with an LSN that has been processed yet is an error.
    1755              :     ///
    1756       112980 :     pub(crate) async fn wait_lsn(
    1757       112980 :         &self,
    1758       112980 :         lsn: Lsn,
    1759       112980 :         who_is_waiting: WaitLsnWaiter<'_>,
    1760       112980 :         timeout: WaitLsnTimeout,
    1761       112980 :         ctx: &RequestContext, /* Prepare for use by cancellation */
    1762       112980 :     ) -> Result<(), WaitLsnError> {
    1763       112980 :         let state = self.current_state();
    1764       112980 :         if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
    1765            0 :             return Err(WaitLsnError::Shutdown);
    1766       112980 :         } else if !matches!(state, TimelineState::Active) {
    1767            0 :             return Err(WaitLsnError::BadState(state));
    1768       112980 :         }
    1769              : 
    1770       112980 :         if cfg!(debug_assertions) {
    1771       112980 :             match ctx.task_kind() {
    1772              :                 TaskKind::WalReceiverManager
    1773              :                 | TaskKind::WalReceiverConnectionHandler
    1774              :                 | TaskKind::WalReceiverConnectionPoller => {
    1775            0 :                     let is_myself = match who_is_waiting {
    1776            0 :                         WaitLsnWaiter::Timeline(waiter) => {
    1777            0 :                             Weak::ptr_eq(&waiter.myself, &self.myself)
    1778              :                         }
    1779              :                         WaitLsnWaiter::Tenant
    1780              :                         | WaitLsnWaiter::PageService
    1781              :                         | WaitLsnWaiter::HttpEndpoint
    1782            0 :                         | WaitLsnWaiter::BaseBackupCache => unreachable!(
    1783              :                             "tenant or page_service context are not expected to have task kind {:?}",
    1784            0 :                             ctx.task_kind()
    1785              :                         ),
    1786              :                     };
    1787            0 :                     if is_myself {
    1788            0 :                         if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
    1789              :                             // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
    1790            0 :                             panic!(
    1791            0 :                                 "this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock"
    1792              :                             );
    1793            0 :                         }
    1794            0 :                     } else {
    1795            0 :                         // if another  timeline's  is waiting for us, there's no deadlock risk because
    1796            0 :                         // our walreceiver task can make progress independent of theirs
    1797            0 :                     }
    1798              :                 }
    1799       112980 :                 _ => {}
    1800              :             }
    1801            0 :         }
    1802              : 
    1803       112980 :         let timeout = match timeout {
    1804            0 :             WaitLsnTimeout::Custom(t) => t,
    1805       112980 :             WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
    1806              :         };
    1807              : 
    1808       112980 :         let timer = crate::metrics::WAIT_LSN_TIME.start_timer();
    1809       112980 :         let start_finish_counterpair_guard = self.metrics.wait_lsn_start_finish_counterpair.guard();
    1810              : 
    1811       112980 :         let wait_for_timeout = self.last_record_lsn.wait_for_timeout(lsn, timeout);
    1812       112980 :         let wait_for_timeout = std::pin::pin!(wait_for_timeout);
    1813              :         // Use threshold of 1 because even 1 second of wait for ingest is very much abnormal.
    1814       112980 :         let log_slow_threshold = Duration::from_secs(1);
    1815              :         // Use period of 10 to avoid flooding logs during an outage that affects all timelines.
    1816       112980 :         let log_slow_period = Duration::from_secs(10);
    1817       112980 :         let mut logging_permit = None;
    1818       112980 :         let wait_for_timeout = monitor_slow_future(
    1819       112980 :             log_slow_threshold,
    1820       112980 :             log_slow_period,
    1821       112980 :             wait_for_timeout,
    1822              :             |MonitorSlowFutureCallback {
    1823              :                  ready,
    1824              :                  is_slow,
    1825              :                  elapsed_total,
    1826              :                  elapsed_since_last_callback,
    1827       112980 :              }| {
    1828       112980 :                 self.metrics
    1829       112980 :                     .wait_lsn_in_progress_micros
    1830       112980 :                     .inc_by(u64::try_from(elapsed_since_last_callback.as_micros()).unwrap());
    1831       112980 :                 if !is_slow {
    1832       112980 :                     return;
    1833            0 :                 }
    1834              :                 // It's slow, see if we should log it.
    1835              :                 // (We limit the logging to one per invocation per timeline to avoid excessive
    1836              :                 // logging during an extended broker / networking outage that affects all timelines.)
    1837            0 :                 if logging_permit.is_none() {
    1838            0 :                     logging_permit = self.wait_lsn_log_slow.try_acquire().ok();
    1839            0 :                 }
    1840            0 :                 if logging_permit.is_none() {
    1841            0 :                     return;
    1842            0 :                 }
    1843              :                 // We log it.
    1844            0 :                 if ready {
    1845            0 :                     info!(
    1846            0 :                         "slow wait_lsn completed after {:.3}s",
    1847            0 :                         elapsed_total.as_secs_f64()
    1848              :                     );
    1849              :                 } else {
    1850            0 :                     info!(
    1851            0 :                         "slow wait_lsn still running for {:.3}s",
    1852            0 :                         elapsed_total.as_secs_f64()
    1853              :                     );
    1854              :                 }
    1855       112980 :             },
    1856              :         );
    1857       112980 :         let res = wait_for_timeout.await;
    1858              :         // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
    1859       112980 :         drop(logging_permit);
    1860       112980 :         drop(start_finish_counterpair_guard);
    1861       112980 :         drop(timer);
    1862       112980 :         match res {
    1863       112980 :             Ok(()) => Ok(()),
    1864            0 :             Err(e) => {
    1865              :                 use utils::seqwait::SeqWaitError::*;
    1866            0 :                 match e {
    1867            0 :                     Shutdown => Err(WaitLsnError::Shutdown),
    1868              :                     Timeout => {
    1869            0 :                         let walreceiver_status = self.walreceiver_status();
    1870            0 :                         Err(WaitLsnError::Timeout(format!(
    1871            0 :                             "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
    1872            0 :                             lsn,
    1873            0 :                             self.get_last_record_lsn(),
    1874            0 :                             self.get_disk_consistent_lsn(),
    1875            0 :                             walreceiver_status,
    1876            0 :                         )))
    1877              :                     }
    1878              :                 }
    1879              :             }
    1880              :         }
    1881       112980 :     }
    1882              : 
    1883            0 :     pub(crate) fn walreceiver_status(&self) -> String {
    1884            0 :         match &*self.walreceiver.lock().unwrap() {
    1885            0 :             None => "stopping or stopped".to_string(),
    1886            0 :             Some(walreceiver) => match walreceiver.status() {
    1887            0 :                 Some(status) => status.to_human_readable_string(),
    1888            0 :                 None => "Not active".to_string(),
    1889              :             },
    1890              :         }
    1891            0 :     }
    1892              : 
    1893              :     /// Check that it is valid to request operations with that lsn.
    1894          119 :     pub(crate) fn check_lsn_is_in_scope(
    1895          119 :         &self,
    1896          119 :         lsn: Lsn,
    1897          119 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1898          119 :     ) -> anyhow::Result<()> {
    1899          119 :         ensure!(
    1900          119 :             lsn >= **latest_gc_cutoff_lsn,
    1901            2 :             "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
    1902              :             lsn,
    1903            2 :             **latest_gc_cutoff_lsn,
    1904              :         );
    1905          117 :         Ok(())
    1906          119 :     }
    1907              : 
    1908              :     /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
    1909            5 :     pub(crate) fn init_lsn_lease(
    1910            5 :         &self,
    1911            5 :         lsn: Lsn,
    1912            5 :         length: Duration,
    1913            5 :         ctx: &RequestContext,
    1914            5 :     ) -> anyhow::Result<LsnLease> {
    1915            5 :         self.make_lsn_lease(lsn, length, true, ctx)
    1916            5 :     }
    1917              : 
    1918              :     /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
    1919            2 :     pub(crate) fn renew_lsn_lease(
    1920            2 :         &self,
    1921            2 :         lsn: Lsn,
    1922            2 :         length: Duration,
    1923            2 :         ctx: &RequestContext,
    1924            2 :     ) -> anyhow::Result<LsnLease> {
    1925            2 :         self.make_lsn_lease(lsn, length, false, ctx)
    1926            2 :     }
    1927              : 
    1928              :     /// Obtains a temporary lease blocking garbage collection for the given LSN.
    1929              :     ///
    1930              :     /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
    1931              :     /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
    1932              :     ///
    1933              :     /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
    1934              :     /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
    1935            7 :     fn make_lsn_lease(
    1936            7 :         &self,
    1937            7 :         lsn: Lsn,
    1938            7 :         length: Duration,
    1939            7 :         init: bool,
    1940            7 :         _ctx: &RequestContext,
    1941            7 :     ) -> anyhow::Result<LsnLease> {
    1942            6 :         let lease = {
    1943              :             // Normalize the requested LSN to be aligned, and move to the first record
    1944              :             // if it points to the beginning of the page (header).
    1945            7 :             let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
    1946              : 
    1947            7 :             let mut gc_info = self.gc_info.write().unwrap();
    1948            7 :             let planned_cutoff = gc_info.min_cutoff();
    1949              : 
    1950            7 :             let valid_until = SystemTime::now() + length;
    1951              : 
    1952            7 :             let entry = gc_info.leases.entry(lsn);
    1953              : 
    1954            7 :             match entry {
    1955            3 :                 Entry::Occupied(mut occupied) => {
    1956            3 :                     let existing_lease = occupied.get_mut();
    1957            3 :                     if valid_until > existing_lease.valid_until {
    1958            1 :                         existing_lease.valid_until = valid_until;
    1959            1 :                         let dt: DateTime<Utc> = valid_until.into();
    1960            1 :                         info!("lease extended to {}", dt);
    1961              :                     } else {
    1962            2 :                         let dt: DateTime<Utc> = existing_lease.valid_until.into();
    1963            2 :                         info!("existing lease covers greater length, valid until {}", dt);
    1964              :                     }
    1965              : 
    1966            3 :                     existing_lease.clone()
    1967              :                 }
    1968            4 :                 Entry::Vacant(vacant) => {
    1969              :                     // Never allow a lease to be requested for an LSN below the applied GC cutoff. The data could have been deleted.
    1970            4 :                     let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
    1971            4 :                     if lsn < *latest_gc_cutoff_lsn {
    1972            1 :                         bail!(
    1973            1 :                             "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}",
    1974              :                             lsn,
    1975            1 :                             *latest_gc_cutoff_lsn
    1976              :                         );
    1977            3 :                     }
    1978              : 
    1979              :                     // We allow create lease for those below the planned gc cutoff if we are still within the grace period
    1980              :                     // of GC blocking.
    1981            3 :                     let validate = {
    1982            3 :                         let conf = self.tenant_conf.load();
    1983            3 :                         !conf.is_gc_blocked_by_lsn_lease_deadline()
    1984              :                     };
    1985              : 
    1986              :                     // Do not allow initial lease creation to be below the planned gc cutoff. The client (compute_ctl) determines
    1987              :                     // whether it is a initial lease creation or a renewal.
    1988            3 :                     if (init || validate) && lsn < planned_cutoff {
    1989            0 :                         bail!(
    1990            0 :                             "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}",
    1991              :                             lsn,
    1992              :                             planned_cutoff
    1993              :                         );
    1994            3 :                     }
    1995              : 
    1996            3 :                     let dt: DateTime<Utc> = valid_until.into();
    1997            3 :                     info!("lease created, valid until {}", dt);
    1998            3 :                     vacant.insert(LsnLease { valid_until }).clone()
    1999              :                 }
    2000              :             }
    2001              :         };
    2002              : 
    2003            6 :         Ok(lease)
    2004            7 :     }
    2005              : 
    2006              :     /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
    2007              :     /// Returns the flush request ID which can be awaited with wait_flush_completion().
    2008              :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    2009              :     pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
    2010              :         self.freeze0().await
    2011              :     }
    2012              : 
    2013              :     /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
    2014              :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    2015              :     pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
    2016              :         self.freeze_and_flush0().await
    2017              :     }
    2018              : 
    2019              :     /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
    2020              :     /// Returns the flush request ID which can be awaited with wait_flush_completion().
    2021          571 :     pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
    2022          571 :         let mut g = self.write_lock.lock().await;
    2023          571 :         let to_lsn = self.get_last_record_lsn();
    2024          571 :         self.freeze_inmem_layer_at(to_lsn, &mut g).await
    2025          571 :     }
    2026              : 
    2027              :     // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
    2028              :     // polluting the span hierarchy.
    2029          571 :     pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
    2030          571 :         let token = self.freeze0().await?;
    2031          571 :         self.wait_flush_completion(token).await
    2032          571 :     }
    2033              : 
    2034              :     // Check if an open ephemeral layer should be closed: this provides
    2035              :     // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
    2036              :     // an ephemeral layer open forever when idle.  It also freezes layers if the global limit on
    2037              :     // ephemeral layer bytes has been breached.
    2038            0 :     pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
    2039            0 :         debug_assert_current_span_has_tenant_and_timeline_id();
    2040              : 
    2041            0 :         let Ok(mut write_guard) = self.write_lock.try_lock() else {
    2042              :             // If the write lock is held, there is an active wal receiver: rolling open layers
    2043              :             // is their responsibility while they hold this lock.
    2044            0 :             return;
    2045              :         };
    2046              : 
    2047              :         // FIXME: why not early exit? because before #7927 the state would had been cleared every
    2048              :         // time, and this was missed.
    2049              :         // if write_guard.is_none() { return; }
    2050              : 
    2051            0 :         let Ok(layers_guard) = self.layers.try_read(LayerManagerLockHolder::TryFreezeLayer) else {
    2052              :             // Don't block if the layer lock is busy
    2053            0 :             return;
    2054              :         };
    2055              : 
    2056            0 :         let Ok(lm) = layers_guard.layer_map() else {
    2057            0 :             return;
    2058              :         };
    2059              : 
    2060            0 :         let Some(open_layer) = &lm.open_layer else {
    2061              :             // If there is no open layer, we have no layer freezing to do.  However, we might need to generate
    2062              :             // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
    2063              :             // that didn't result in writes to this shard.
    2064              : 
    2065              :             // Must not hold the layers lock while waiting for a flush.
    2066            0 :             drop(layers_guard);
    2067              : 
    2068            0 :             let last_record_lsn = self.get_last_record_lsn();
    2069            0 :             let disk_consistent_lsn = self.get_disk_consistent_lsn();
    2070            0 :             if last_record_lsn > disk_consistent_lsn {
    2071              :                 // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
    2072              :                 // we are a sharded tenant and have skipped some WAL
    2073            0 :                 let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
    2074            0 :                 if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
    2075              :                     // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
    2076              :                     // without any data ingested (yet) doesn't write a remote index as soon as it
    2077              :                     // sees its LSN advance: we only do this if we've been layer-less
    2078              :                     // for some time.
    2079            0 :                     tracing::debug!(
    2080            0 :                         "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
    2081              :                         disk_consistent_lsn,
    2082              :                         last_record_lsn
    2083              :                     );
    2084              : 
    2085              :                     // The flush loop will update remote consistent LSN as well as disk consistent LSN.
    2086              :                     // We know there is no open layer, so we can request freezing without actually
    2087              :                     // freezing anything. This is true even if we have dropped the layers_guard, we
    2088              :                     // still hold the write_guard.
    2089            0 :                     let _ = async {
    2090            0 :                         let token = self
    2091            0 :                             .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
    2092            0 :                             .await?;
    2093            0 :                         self.wait_flush_completion(token).await
    2094            0 :                     }
    2095            0 :                     .await;
    2096            0 :                 }
    2097            0 :             }
    2098              : 
    2099            0 :             return;
    2100              :         };
    2101              : 
    2102            0 :         let current_size = open_layer.len();
    2103              : 
    2104            0 :         let current_lsn = self.get_last_record_lsn();
    2105              : 
    2106            0 :         let checkpoint_distance_override = open_layer.tick();
    2107              : 
    2108            0 :         if let Some(size_override) = checkpoint_distance_override {
    2109            0 :             if current_size > size_override {
    2110              :                 // This is not harmful, but it only happens in relatively rare cases where
    2111              :                 // time-based checkpoints are not happening fast enough to keep the amount of
    2112              :                 // ephemeral data within configured limits.  It's a sign of stress on the system.
    2113            0 :                 tracing::info!(
    2114            0 :                     "Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure"
    2115              :                 );
    2116            0 :             }
    2117            0 :         }
    2118              : 
    2119            0 :         let checkpoint_distance =
    2120            0 :             checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
    2121              : 
    2122            0 :         if self.should_roll(
    2123            0 :             current_size,
    2124            0 :             current_size,
    2125            0 :             checkpoint_distance,
    2126            0 :             self.get_last_record_lsn(),
    2127            0 :             self.last_freeze_at.load(),
    2128            0 :             open_layer.get_opened_at(),
    2129              :         ) {
    2130            0 :             match open_layer.info() {
    2131            0 :                 InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
    2132              :                     // We may reach this point if the layer was already frozen by not yet flushed: flushing
    2133              :                     // happens asynchronously in the background.
    2134            0 :                     tracing::debug!(
    2135            0 :                         "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
    2136              :                     );
    2137              :                 }
    2138              :                 InMemoryLayerInfo::Open { .. } => {
    2139              :                     // Upgrade to a write lock and freeze the layer
    2140            0 :                     drop(layers_guard);
    2141            0 :                     let res = self
    2142            0 :                         .freeze_inmem_layer_at(current_lsn, &mut write_guard)
    2143            0 :                         .await;
    2144              : 
    2145            0 :                     if let Err(e) = res {
    2146            0 :                         tracing::info!(
    2147            0 :                             "failed to flush frozen layer after background freeze: {e:#}"
    2148              :                         );
    2149            0 :                     }
    2150              :                 }
    2151              :             }
    2152            0 :         }
    2153            0 :     }
    2154              : 
    2155              :     /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
    2156              :     ///
    2157              :     /// This is neccessary but not sufficient for offloading of the timeline as it might have
    2158              :     /// child timelines that are not offloaded yet.
    2159            0 :     pub(crate) fn can_offload(&self) -> (bool, &'static str) {
    2160            0 :         if self.remote_client.is_archived() != Some(true) {
    2161            0 :             return (false, "the timeline is not archived");
    2162            0 :         }
    2163            0 :         if !self.remote_client.no_pending_work() {
    2164              :             // if the remote client is still processing some work, we can't offload
    2165            0 :             return (false, "the upload queue is not drained yet");
    2166            0 :         }
    2167              : 
    2168            0 :         (true, "ok")
    2169            0 :     }
    2170              : 
    2171              :     /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
    2172              :     /// compaction tasks.
    2173          192 :     pub(crate) async fn compact(
    2174          192 :         self: &Arc<Self>,
    2175          192 :         cancel: &CancellationToken,
    2176          192 :         flags: EnumSet<CompactFlags>,
    2177          192 :         ctx: &RequestContext,
    2178          192 :     ) -> Result<CompactionOutcome, CompactionError> {
    2179          192 :         let res = self
    2180          192 :             .compact_with_options(
    2181          192 :                 cancel,
    2182          192 :                 CompactOptions {
    2183          192 :                     flags,
    2184          192 :                     compact_key_range: None,
    2185          192 :                     compact_lsn_range: None,
    2186          192 :                     sub_compaction: false,
    2187          192 :                     sub_compaction_max_job_size_mb: None,
    2188          192 :                 },
    2189          192 :                 ctx,
    2190          192 :             )
    2191          192 :             .await;
    2192          192 :         if let Err(err) = &res {
    2193            0 :             log_compaction_error(err, None, cancel.is_cancelled(), false);
    2194          192 :         }
    2195          192 :         res
    2196          192 :     }
    2197              : 
    2198              :     /// Outermost timeline compaction operation; downloads needed layers.
    2199              :     ///
    2200              :     /// NB: the cancellation token is usually from a background task, but can also come from a
    2201              :     /// request task.
    2202          192 :     pub(crate) async fn compact_with_options(
    2203          192 :         self: &Arc<Self>,
    2204          192 :         cancel: &CancellationToken,
    2205          192 :         options: CompactOptions,
    2206          192 :         ctx: &RequestContext,
    2207          192 :     ) -> Result<CompactionOutcome, CompactionError> {
    2208              :         // Acquire the compaction lock and task semaphore.
    2209              :         //
    2210              :         // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
    2211              :         // out by other background tasks (including image compaction). We request this via
    2212              :         // `BackgroundLoopKind::L0Compaction`.
    2213              :         //
    2214              :         // Yield for pending L0 compaction while waiting for the semaphore.
    2215          192 :         let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
    2216          192 :         let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
    2217            0 :             true => BackgroundLoopKind::L0Compaction,
    2218          192 :             false => BackgroundLoopKind::Compaction,
    2219              :         };
    2220          192 :         let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
    2221          192 :         if yield_for_l0 {
    2222              :             // If this is an L0 pass, it doesn't make sense to yield for L0.
    2223            0 :             debug_assert!(!is_l0_only, "YieldForL0 during L0 pass");
    2224              :             // If `compaction_l0_first` is disabled, there's no point yielding.
    2225            0 :             debug_assert!(self.get_compaction_l0_first(), "YieldForL0 without L0 pass");
    2226          192 :         }
    2227              : 
    2228          192 :         let acquire = async move {
    2229          192 :             let guard = self.compaction_lock.lock().await;
    2230          192 :             let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
    2231          192 :             (guard, permit)
    2232          192 :         };
    2233              : 
    2234          192 :         let (_guard, _permit) = tokio::select! {
    2235          192 :             (guard, permit) = acquire => (guard, permit),
    2236          192 :             _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
    2237            0 :                 return Ok(CompactionOutcome::YieldForL0);
    2238              :             }
    2239          192 :             _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
    2240          192 :             _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
    2241              :         };
    2242              : 
    2243          192 :         let last_record_lsn = self.get_last_record_lsn();
    2244              : 
    2245              :         // Last record Lsn could be zero in case the timeline was just created
    2246          192 :         if !last_record_lsn.is_valid() {
    2247            0 :             warn!(
    2248            0 :                 "Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}"
    2249              :             );
    2250            0 :             return Ok(CompactionOutcome::Skipped);
    2251          192 :         }
    2252              : 
    2253          192 :         let result = match self.get_compaction_algorithm_settings().kind {
    2254              :             CompactionAlgorithm::Tiered => {
    2255            0 :                 self.compact_tiered(cancel, ctx).await?;
    2256            0 :                 Ok(CompactionOutcome::Done)
    2257              :             }
    2258          192 :             CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
    2259              :         };
    2260              : 
    2261              :         // Signal compaction failure to avoid L0 flush stalls when it's broken.
    2262            0 :         match &result {
    2263          192 :             Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
    2264            0 :             Err(e) if e.is_cancel() => {}
    2265            0 :             Err(_) => self.compaction_failed.store(true, AtomicOrdering::Relaxed),
    2266              :         };
    2267              : 
    2268          192 :         result
    2269          192 :     }
    2270              : 
    2271              :     /// Mutate the timeline with a [`TimelineWriter`].
    2272      2566609 :     pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
    2273              :         TimelineWriter {
    2274      2566609 :             tl: self,
    2275      2566609 :             write_guard: self.write_lock.lock().await,
    2276              :         }
    2277      2566609 :     }
    2278              : 
    2279            0 :     pub(crate) fn activate(
    2280            0 :         self: &Arc<Self>,
    2281            0 :         parent: Arc<crate::tenant::TenantShard>,
    2282            0 :         broker_client: BrokerClientChannel,
    2283            0 :         background_jobs_can_start: Option<&completion::Barrier>,
    2284            0 :         ctx: &RequestContext,
    2285            0 :     ) {
    2286            0 :         if self.tenant_shard_id.is_shard_zero() {
    2287            0 :             // Logical size is only maintained accurately on shard zero.
    2288            0 :             self.spawn_initial_logical_size_computation_task(ctx);
    2289            0 :         }
    2290            0 :         self.launch_wal_receiver(ctx, broker_client);
    2291            0 :         self.set_state(TimelineState::Active);
    2292            0 :         self.launch_eviction_task(parent, background_jobs_can_start);
    2293            0 :     }
    2294              : 
    2295              :     /// After this function returns, there are no timeline-scoped tasks are left running.
    2296              :     ///
    2297              :     /// The preferred pattern for is:
    2298              :     /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
    2299              :     /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
    2300              :     ///   go the extra mile and keep track of JoinHandles
    2301              :     /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
    2302              :     ///   instead of spawning directly on a runtime. It is a more composable / testable pattern.
    2303              :     ///
    2304              :     /// For legacy reasons, we still have multiple tasks spawned using
    2305              :     /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
    2306              :     /// We refer to these as "timeline-scoped task_mgr tasks".
    2307              :     /// Some of these tasks are already sensitive to Timeline::cancel while others are
    2308              :     /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
    2309              :     /// or [`task_mgr::shutdown_watcher`].
    2310              :     /// We want to gradually convert the code base away from these.
    2311              :     ///
    2312              :     /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
    2313              :     /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
    2314              :     /// ones that aren't mentioned here):
    2315              :     /// - [`TaskKind::TimelineDeletionWorker`]
    2316              :     ///    - NB: also used for tenant deletion
    2317              :     /// - [`TaskKind::RemoteUploadTask`]`
    2318              :     /// - [`TaskKind::InitialLogicalSizeCalculation`]
    2319              :     /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
    2320              :     // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
    2321              :     /// - [`TaskKind::Eviction`]
    2322              :     /// - [`TaskKind::LayerFlushTask`]
    2323              :     /// - [`TaskKind::OndemandLogicalSizeCalculation`]
    2324              :     /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
    2325            5 :     pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
    2326            5 :         debug_assert_current_span_has_tenant_and_timeline_id();
    2327              : 
    2328              :         // Regardless of whether we're going to try_freeze_and_flush
    2329              :         // cancel walreceiver to stop ingesting more data asap.
    2330              :         //
    2331              :         // Note that we're accepting a race condition here where we may
    2332              :         // do the final flush below, before walreceiver observes the
    2333              :         // cancellation and exits.
    2334              :         // This means we may open a new InMemoryLayer after the final flush below.
    2335              :         // Flush loop is also still running for a short while, so, in theory, it
    2336              :         // could also make its way into the upload queue.
    2337              :         //
    2338              :         // If we wait for the shutdown of the walreceiver before moving on to the
    2339              :         // flush, then that would be avoided. But we don't do it because the
    2340              :         // walreceiver entertains reads internally, which means that it possibly
    2341              :         // depends on the download of layers. Layer download is only sensitive to
    2342              :         // the cancellation of the entire timeline, so cancelling the walreceiver
    2343              :         // will have no effect on the individual get requests.
    2344              :         // This would cause problems when there is a lot of ongoing downloads or
    2345              :         // there is S3 unavailabilities, i.e. detach, deletion, etc would hang,
    2346              :         // and we can't deallocate resources of the timeline, etc.
    2347            5 :         let walreceiver = self.walreceiver.lock().unwrap().take();
    2348            5 :         tracing::debug!(
    2349            0 :             is_some = walreceiver.is_some(),
    2350            0 :             "Waiting for WalReceiverManager..."
    2351              :         );
    2352            5 :         if let Some(walreceiver) = walreceiver {
    2353            0 :             walreceiver.cancel().await;
    2354            5 :         }
    2355              :         // ... and inform any waiters for newer LSNs that there won't be any.
    2356            5 :         self.last_record_lsn.shutdown();
    2357              : 
    2358            5 :         if let ShutdownMode::FreezeAndFlush = mode {
    2359            3 :             let do_flush = if let Some((open, frozen)) = self
    2360            3 :                 .layers
    2361            3 :                 .read(LayerManagerLockHolder::Shutdown)
    2362            3 :                 .await
    2363            3 :                 .layer_map()
    2364            3 :                 .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
    2365            3 :                 .ok()
    2366            3 :                 .filter(|(open, frozen)| *open || *frozen > 0)
    2367              :             {
    2368            0 :                 if self.remote_client.is_archived() == Some(true) {
    2369              :                     // No point flushing on shutdown for an archived timeline: it is not important
    2370              :                     // to have it nice and fresh after our restart, and trying to flush here might
    2371              :                     // race with trying to offload it (which also stops the flush loop)
    2372            0 :                     false
    2373              :                 } else {
    2374            0 :                     tracing::info!(?open, frozen, "flushing and freezing on shutdown");
    2375            0 :                     true
    2376              :                 }
    2377              :             } else {
    2378              :                 // this is double-shutdown, it'll be a no-op
    2379            3 :                 true
    2380              :             };
    2381              : 
    2382              :             // we shut down walreceiver above, so, we won't add anything more
    2383              :             // to the InMemoryLayer; freeze it and wait for all frozen layers
    2384              :             // to reach the disk & upload queue, then shut the upload queue and
    2385              :             // wait for it to drain.
    2386            3 :             if do_flush {
    2387            3 :                 match self.freeze_and_flush().await {
    2388              :                     Ok(_) => {
    2389              :                         // drain the upload queue
    2390              :                         // if we did not wait for completion here, it might be our shutdown process
    2391              :                         // didn't wait for remote uploads to complete at all, as new tasks can forever
    2392              :                         // be spawned.
    2393              :                         //
    2394              :                         // what is problematic is the shutting down of RemoteTimelineClient, because
    2395              :                         // obviously it does not make sense to stop while we wait for it, but what
    2396              :                         // about corner cases like s3 suddenly hanging up?
    2397            3 :                         self.remote_client.shutdown().await;
    2398              :                     }
    2399              :                     Err(FlushLayerError::Cancelled) => {
    2400              :                         // this is likely the second shutdown, ignore silently.
    2401              :                         // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
    2402            0 :                         debug_assert!(self.cancel.is_cancelled());
    2403              :                     }
    2404            0 :                     Err(e) => {
    2405              :                         // Non-fatal.  Shutdown is infallible.  Failures to flush just mean that
    2406              :                         // we have some extra WAL replay to do next time the timeline starts.
    2407            0 :                         warn!("failed to freeze and flush: {e:#}");
    2408              :                     }
    2409              :                 }
    2410              : 
    2411              :                 // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
    2412              :                 // we also do a final check here to ensure that the queue is empty.
    2413            3 :                 if !self.remote_client.no_pending_work() {
    2414            0 :                     warn!(
    2415            0 :                         "still have pending work in remote upload queue, but continuing shutting down anyways"
    2416              :                     );
    2417            3 :                 }
    2418            0 :             }
    2419            2 :         }
    2420              : 
    2421            5 :         if let ShutdownMode::Reload = mode {
    2422              :             // drain the upload queue
    2423            1 :             self.remote_client.shutdown().await;
    2424            1 :             if !self.remote_client.no_pending_work() {
    2425            0 :                 warn!(
    2426            0 :                     "still have pending work in remote upload queue, but continuing shutting down anyways"
    2427              :                 );
    2428            1 :             }
    2429            4 :         }
    2430              : 
    2431              :         // Signal any subscribers to our cancellation token to drop out
    2432            5 :         tracing::debug!("Cancelling CancellationToken");
    2433            5 :         self.cancel.cancel();
    2434              : 
    2435              :         // If we have a background task downloading heatmap layers stop it.
    2436              :         // The background downloads are sensitive to timeline cancellation (done above),
    2437              :         // so the drain will be immediate.
    2438            5 :         self.stop_and_drain_heatmap_layers_download().await;
    2439              : 
    2440              :         // Ensure Prevent new page service requests from starting.
    2441            5 :         self.handles.shutdown();
    2442              : 
    2443              :         // Transition the remote_client into a state where it's only useful for timeline deletion.
    2444              :         // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
    2445            5 :         self.remote_client.stop();
    2446              : 
    2447              :         // As documented in remote_client.stop()'s doc comment, it's our responsibility
    2448              :         // to shut down the upload queue tasks.
    2449              :         // TODO: fix that, task management should be encapsulated inside remote_client.
    2450            5 :         task_mgr::shutdown_tasks(
    2451            5 :             Some(TaskKind::RemoteUploadTask),
    2452            5 :             Some(self.tenant_shard_id),
    2453            5 :             Some(self.timeline_id),
    2454            5 :         )
    2455            5 :         .await;
    2456              : 
    2457              :         // TODO: work toward making this a no-op. See this function's doc comment for more context.
    2458            5 :         tracing::debug!("Waiting for tasks...");
    2459            5 :         task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
    2460              : 
    2461              :         {
    2462              :             // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
    2463              :             // open.
    2464            5 :             let mut write_guard = self.write_lock.lock().await;
    2465            5 :             self.layers
    2466            5 :                 .write(LayerManagerLockHolder::Shutdown)
    2467            5 :                 .await
    2468            5 :                 .shutdown(&mut write_guard);
    2469              :         }
    2470              : 
    2471              :         // Finally wait until any gate-holders are complete.
    2472              :         //
    2473              :         // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
    2474              :         // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
    2475            5 :         self.gate.close().await;
    2476              : 
    2477            5 :         self.metrics.shutdown();
    2478            5 :     }
    2479              : 
    2480          236 :     pub(crate) fn set_state(&self, new_state: TimelineState) {
    2481          236 :         match (self.current_state(), new_state) {
    2482          236 :             (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
    2483            1 :                 info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
    2484              :             }
    2485            0 :             (st, TimelineState::Loading) => {
    2486            0 :                 error!("ignoring transition from {st:?} into Loading state");
    2487              :             }
    2488            0 :             (TimelineState::Broken { .. }, new_state) => {
    2489            0 :                 error!("Ignoring state update {new_state:?} for broken timeline");
    2490              :             }
    2491              :             (TimelineState::Stopping, TimelineState::Active) => {
    2492            0 :                 error!("Not activating a Stopping timeline");
    2493              :             }
    2494          235 :             (_, new_state) => {
    2495          235 :                 self.state.send_replace(new_state);
    2496          235 :             }
    2497              :         }
    2498          236 :     }
    2499              : 
    2500            1 :     pub(crate) fn set_broken(&self, reason: String) {
    2501            1 :         let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
    2502            1 :         let broken_state = TimelineState::Broken {
    2503            1 :             reason,
    2504            1 :             backtrace: backtrace_str,
    2505            1 :         };
    2506            1 :         self.set_state(broken_state);
    2507              : 
    2508              :         // Although the Broken state is not equivalent to shutdown() (shutdown will be called
    2509              :         // later when this tenant is detach or the process shuts down), firing the cancellation token
    2510              :         // here avoids the need for other tasks to watch for the Broken state explicitly.
    2511            1 :         self.cancel.cancel();
    2512            1 :     }
    2513              : 
    2514       113910 :     pub(crate) fn current_state(&self) -> TimelineState {
    2515       113910 :         self.state.borrow().clone()
    2516       113910 :     }
    2517              : 
    2518            3 :     pub(crate) fn is_broken(&self) -> bool {
    2519            3 :         matches!(&*self.state.borrow(), TimelineState::Broken { .. })
    2520            3 :     }
    2521              : 
    2522          126 :     pub(crate) fn is_active(&self) -> bool {
    2523          126 :         self.current_state() == TimelineState::Active
    2524          126 :     }
    2525              : 
    2526            8 :     pub(crate) fn is_archived(&self) -> Option<bool> {
    2527            8 :         self.remote_client.is_archived()
    2528            8 :     }
    2529              : 
    2530            8 :     pub(crate) fn is_invisible(&self) -> Option<bool> {
    2531            8 :         self.remote_client.is_invisible()
    2532            8 :     }
    2533              : 
    2534          568 :     pub(crate) fn is_stopping(&self) -> bool {
    2535          568 :         self.current_state() == TimelineState::Stopping
    2536          568 :     }
    2537              : 
    2538            0 :     pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
    2539            0 :         self.state.subscribe()
    2540            0 :     }
    2541              : 
    2542       112981 :     pub(crate) async fn wait_to_become_active(
    2543       112981 :         &self,
    2544       112981 :         _ctx: &RequestContext, // Prepare for use by cancellation
    2545       112981 :     ) -> Result<(), TimelineState> {
    2546       112981 :         let mut receiver = self.state.subscribe();
    2547              :         loop {
    2548       112981 :             let current_state = receiver.borrow().clone();
    2549       112981 :             match current_state {
    2550              :                 TimelineState::Loading => {
    2551            0 :                     receiver
    2552            0 :                         .changed()
    2553            0 :                         .await
    2554            0 :                         .expect("holding a reference to self");
    2555              :                 }
    2556              :                 TimelineState::Active => {
    2557       112980 :                     return Ok(());
    2558              :                 }
    2559              :                 TimelineState::Broken { .. } | TimelineState::Stopping => {
    2560              :                     // There's no chance the timeline can transition back into ::Active
    2561            1 :                     return Err(current_state);
    2562              :                 }
    2563              :             }
    2564              :         }
    2565       112981 :     }
    2566              : 
    2567            0 :     pub(crate) async fn layer_map_info(
    2568            0 :         &self,
    2569            0 :         reset: LayerAccessStatsReset,
    2570            0 :     ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
    2571            0 :         let guard = self
    2572            0 :             .layers
    2573            0 :             .read(LayerManagerLockHolder::GetLayerMapInfo)
    2574            0 :             .await;
    2575            0 :         let layer_map = guard.layer_map()?;
    2576            0 :         let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
    2577            0 :         if let Some(open_layer) = &layer_map.open_layer {
    2578            0 :             in_memory_layers.push(open_layer.info());
    2579            0 :         }
    2580            0 :         for frozen_layer in &layer_map.frozen_layers {
    2581            0 :             in_memory_layers.push(frozen_layer.info());
    2582            0 :         }
    2583              : 
    2584            0 :         let historic_layers = layer_map
    2585            0 :             .iter_historic_layers()
    2586            0 :             .map(|desc| guard.get_from_desc(&desc).info(reset))
    2587            0 :             .collect();
    2588              : 
    2589            0 :         Ok(LayerMapInfo {
    2590            0 :             in_memory_layers,
    2591            0 :             historic_layers,
    2592            0 :         })
    2593            0 :     }
    2594              : 
    2595              :     #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
    2596              :     pub(crate) async fn download_layer(
    2597              :         &self,
    2598              :         layer_file_name: &LayerName,
    2599              :         ctx: &RequestContext,
    2600              :     ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
    2601              :         let Some(layer) = self
    2602              :             .find_layer(layer_file_name)
    2603              :             .await
    2604            0 :             .map_err(|e| match e {
    2605              :                 layer_manager::Shutdown => {
    2606            0 :                     super::storage_layer::layer::DownloadError::TimelineShutdown
    2607              :                 }
    2608            0 :             })?
    2609              :         else {
    2610              :             return Ok(None);
    2611              :         };
    2612              : 
    2613              :         layer.download(ctx).await?;
    2614              : 
    2615              :         Ok(Some(true))
    2616              :     }
    2617              : 
    2618              :     /// Evict just one layer.
    2619              :     ///
    2620              :     /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
    2621            0 :     pub(crate) async fn evict_layer(
    2622            0 :         &self,
    2623            0 :         layer_file_name: &LayerName,
    2624            0 :     ) -> anyhow::Result<Option<bool>> {
    2625            0 :         let _gate = self
    2626            0 :             .gate
    2627            0 :             .enter()
    2628            0 :             .map_err(|_| anyhow::anyhow!("Shutting down"))?;
    2629              : 
    2630            0 :         let Some(local_layer) = self.find_layer(layer_file_name).await? else {
    2631            0 :             return Ok(None);
    2632              :         };
    2633              : 
    2634              :         // curl has this by default
    2635            0 :         let timeout = std::time::Duration::from_secs(120);
    2636              : 
    2637            0 :         match local_layer.evict_and_wait(timeout).await {
    2638            0 :             Ok(()) => Ok(Some(true)),
    2639            0 :             Err(EvictionError::NotFound) => Ok(Some(false)),
    2640            0 :             Err(EvictionError::Downloaded) => Ok(Some(false)),
    2641            0 :             Err(EvictionError::Timeout) => Ok(Some(false)),
    2642              :         }
    2643            0 :     }
    2644              : 
    2645      2401506 :     fn should_roll(
    2646      2401506 :         &self,
    2647      2401506 :         layer_size: u64,
    2648      2401506 :         projected_layer_size: u64,
    2649      2401506 :         checkpoint_distance: u64,
    2650      2401506 :         projected_lsn: Lsn,
    2651      2401506 :         last_freeze_at: Lsn,
    2652      2401506 :         opened_at: Instant,
    2653      2401506 :     ) -> bool {
    2654      2401506 :         let distance = projected_lsn.widening_sub(last_freeze_at);
    2655              : 
    2656              :         // Rolling the open layer can be triggered by:
    2657              :         // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
    2658              :         //    the safekeepers need to store.  For sharded tenants, we multiply by shard count to
    2659              :         //    account for how writes are distributed across shards: we expect each node to consume
    2660              :         //    1/count of the LSN on average.
    2661              :         // 2. The size of the currently open layer.
    2662              :         // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
    2663              :         //    up and suspend activity.
    2664      2401506 :         if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
    2665            0 :             info!(
    2666            0 :                 "Will roll layer at {} with layer size {} due to LSN distance ({})",
    2667              :                 projected_lsn, layer_size, distance
    2668              :             );
    2669              : 
    2670            0 :             true
    2671      2401506 :         } else if projected_layer_size >= checkpoint_distance {
    2672              :             // NB: this check is relied upon by:
    2673           40 :             let _ = IndexEntry::validate_checkpoint_distance;
    2674           40 :             info!(
    2675            0 :                 "Will roll layer at {} with layer size {} due to layer size ({})",
    2676              :                 projected_lsn, layer_size, projected_layer_size
    2677              :             );
    2678              : 
    2679           40 :             true
    2680      2401466 :         } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
    2681            0 :             info!(
    2682            0 :                 "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
    2683              :                 projected_lsn,
    2684              :                 layer_size,
    2685            0 :                 opened_at.elapsed()
    2686              :             );
    2687              : 
    2688            0 :             true
    2689              :         } else {
    2690      2401466 :             false
    2691              :         }
    2692      2401506 :     }
    2693              : 
    2694            1 :     pub(crate) fn is_basebackup_cache_enabled(&self) -> bool {
    2695            1 :         let tenant_conf = self.tenant_conf.load();
    2696            1 :         tenant_conf
    2697            1 :             .tenant_conf
    2698            1 :             .basebackup_cache_enabled
    2699            1 :             .unwrap_or(self.conf.default_tenant_conf.basebackup_cache_enabled)
    2700            1 :     }
    2701              : 
    2702              :     /// Try to get a basebackup from the on-disk cache.
    2703            0 :     pub(crate) async fn get_cached_basebackup(&self, lsn: Lsn) -> Option<tokio::fs::File> {
    2704            0 :         self.basebackup_cache
    2705            0 :             .get(self.tenant_shard_id.tenant_id, self.timeline_id, lsn)
    2706            0 :             .await
    2707            0 :     }
    2708              : 
    2709              :     /// Convenience method to attempt fetching a basebackup for the timeline if enabled and safe for
    2710              :     /// the given request parameters.
    2711              :     ///
    2712              :     /// TODO: consider moving this onto GrpcPageServiceHandler once the libpq handler is gone.
    2713            0 :     pub async fn get_cached_basebackup_if_enabled(
    2714            0 :         &self,
    2715            0 :         lsn: Option<Lsn>,
    2716            0 :         prev_lsn: Option<Lsn>,
    2717            0 :         full: bool,
    2718            0 :         replica: bool,
    2719            0 :         gzip: bool,
    2720            0 :     ) -> Option<tokio::fs::File> {
    2721            0 :         if !self.is_basebackup_cache_enabled() || !self.basebackup_cache.is_enabled() {
    2722            0 :             return None;
    2723            0 :         }
    2724              :         // We have to know which LSN to fetch the basebackup for.
    2725            0 :         let lsn = lsn?;
    2726              :         // We only cache gzipped, non-full basebackups for primary computes with automatic prev_lsn.
    2727            0 :         if prev_lsn.is_some() || full || replica || !gzip {
    2728            0 :             return None;
    2729            0 :         }
    2730            0 :         self.get_cached_basebackup(lsn).await
    2731            0 :     }
    2732              : 
    2733              :     /// Prepare basebackup for the given LSN and store it in the basebackup cache.
    2734              :     /// The method is asynchronous and returns immediately.
    2735              :     /// The actual basebackup preparation is performed in the background
    2736              :     /// by the basebackup cache on a best-effort basis.
    2737            1 :     pub(crate) fn prepare_basebackup(&self, lsn: Lsn) {
    2738            1 :         if !self.is_basebackup_cache_enabled() {
    2739            1 :             return;
    2740            0 :         }
    2741            0 :         if !self.tenant_shard_id.is_shard_zero() {
    2742              :             // In theory we should never get here, but just in case check it.
    2743              :             // Preparing basebackup doesn't make sense for shards other than shard zero.
    2744            0 :             return;
    2745            0 :         }
    2746            0 :         if !self.is_active() {
    2747              :             // May happen during initial timeline creation.
    2748              :             // Such timeline is not in the global timeline map yet,
    2749              :             // so basebackup cache will not be able to find it.
    2750              :             // TODO(diko): We can prepare such timelines in finish_creation().
    2751            0 :             return;
    2752            0 :         }
    2753              : 
    2754            0 :         self.basebackup_cache
    2755            0 :             .send_prepare(self.tenant_shard_id, self.timeline_id, lsn);
    2756            1 :     }
    2757              : }
    2758              : 
    2759              : /// Number of times we will compute partition within a checkpoint distance.
    2760              : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
    2761              : 
    2762              : // Private functions
    2763              : impl Timeline {
    2764            6 :     pub(crate) fn get_lsn_lease_length(&self) -> Duration {
    2765            6 :         let tenant_conf = self.tenant_conf.load();
    2766            6 :         tenant_conf
    2767            6 :             .tenant_conf
    2768            6 :             .lsn_lease_length
    2769            6 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
    2770            6 :     }
    2771              : 
    2772            0 :     pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
    2773            0 :         let tenant_conf = self.tenant_conf.load();
    2774            0 :         tenant_conf
    2775            0 :             .tenant_conf
    2776            0 :             .lsn_lease_length_for_ts
    2777            0 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
    2778            0 :     }
    2779              : 
    2780            0 :     pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
    2781            0 :         let tenant_conf = self.tenant_conf.load();
    2782            0 :         tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
    2783            0 :     }
    2784              : 
    2785            0 :     pub(crate) fn get_lazy_slru_download(&self) -> bool {
    2786            0 :         let tenant_conf = self.tenant_conf.load();
    2787            0 :         tenant_conf
    2788            0 :             .tenant_conf
    2789            0 :             .lazy_slru_download
    2790            0 :             .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
    2791            0 :     }
    2792              : 
    2793              :     /// Checks if a get page request should get perf tracing
    2794              :     ///
    2795              :     /// The configuration priority is: tenant config override, default tenant config,
    2796              :     /// pageserver config.
    2797            0 :     pub(crate) fn is_get_page_request_sampled(&self) -> bool {
    2798            0 :         let tenant_conf = self.tenant_conf.load();
    2799            0 :         let ratio = tenant_conf
    2800            0 :             .tenant_conf
    2801            0 :             .sampling_ratio
    2802            0 :             .flatten()
    2803            0 :             .or(self.conf.default_tenant_conf.sampling_ratio)
    2804            0 :             .or(self.conf.tracing.as_ref().map(|t| t.sampling_ratio));
    2805              : 
    2806            0 :         match ratio {
    2807            0 :             Some(r) => {
    2808            0 :                 if r.numerator == 0 {
    2809            0 :                     false
    2810              :                 } else {
    2811            0 :                     rand::thread_rng().gen_range(0..r.denominator) < r.numerator
    2812              :                 }
    2813              :             }
    2814            0 :             None => false,
    2815              :         }
    2816            0 :     }
    2817              : 
    2818      2402187 :     fn get_checkpoint_distance(&self) -> u64 {
    2819      2402187 :         let tenant_conf = self.tenant_conf.load();
    2820      2402187 :         tenant_conf
    2821      2402187 :             .tenant_conf
    2822      2402187 :             .checkpoint_distance
    2823      2402187 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
    2824      2402187 :     }
    2825              : 
    2826      2401522 :     fn get_checkpoint_timeout(&self) -> Duration {
    2827      2401522 :         let tenant_conf = self.tenant_conf.load();
    2828      2401522 :         tenant_conf
    2829      2401522 :             .tenant_conf
    2830      2401522 :             .checkpoint_timeout
    2831      2401522 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
    2832      2401522 :     }
    2833              : 
    2834            1 :     pub(crate) fn get_pitr_interval(&self) -> Duration {
    2835            1 :         let tenant_conf = &self.tenant_conf.load().tenant_conf;
    2836            1 :         tenant_conf
    2837            1 :             .pitr_interval
    2838            1 :             .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
    2839            1 :     }
    2840              : 
    2841         1273 :     fn get_compaction_period(&self) -> Duration {
    2842         1273 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2843         1273 :         tenant_conf
    2844         1273 :             .compaction_period
    2845         1273 :             .unwrap_or(self.conf.default_tenant_conf.compaction_period)
    2846         1273 :     }
    2847              : 
    2848          352 :     fn get_compaction_target_size(&self) -> u64 {
    2849          352 :         let tenant_conf = self.tenant_conf.load();
    2850          352 :         tenant_conf
    2851          352 :             .tenant_conf
    2852          352 :             .compaction_target_size
    2853          352 :             .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
    2854          352 :     }
    2855              : 
    2856          820 :     fn get_compaction_threshold(&self) -> usize {
    2857          820 :         let tenant_conf = self.tenant_conf.load();
    2858          820 :         tenant_conf
    2859          820 :             .tenant_conf
    2860          820 :             .compaction_threshold
    2861          820 :             .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
    2862          820 :     }
    2863              : 
    2864              :     /// Returns `true` if the rel_size_v2 config is enabled. NOTE: the write path and read path
    2865              :     /// should look at `get_rel_size_v2_status()` to get the actual status of the timeline. It is
    2866              :     /// possible that the index part persists the state while the config doesn't get persisted.
    2867          973 :     pub(crate) fn get_rel_size_v2_enabled(&self) -> bool {
    2868          973 :         let tenant_conf = self.tenant_conf.load();
    2869          973 :         tenant_conf
    2870          973 :             .tenant_conf
    2871          973 :             .rel_size_v2_enabled
    2872          973 :             .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
    2873          973 :     }
    2874              : 
    2875         1099 :     pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
    2876         1099 :         self.rel_size_v2_status
    2877         1099 :             .load()
    2878         1099 :             .as_ref()
    2879         1099 :             .map(|s| s.as_ref().clone())
    2880         1099 :             .unwrap_or(RelSizeMigration::Legacy)
    2881         1099 :     }
    2882              : 
    2883           23 :     fn get_compaction_upper_limit(&self) -> usize {
    2884           23 :         let tenant_conf = self.tenant_conf.load();
    2885           23 :         tenant_conf
    2886           23 :             .tenant_conf
    2887           23 :             .compaction_upper_limit
    2888           23 :             .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
    2889           23 :     }
    2890              : 
    2891            0 :     pub fn get_compaction_l0_first(&self) -> bool {
    2892            0 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2893            0 :         tenant_conf
    2894            0 :             .compaction_l0_first
    2895            0 :             .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
    2896            0 :     }
    2897              : 
    2898            0 :     pub fn get_compaction_l0_semaphore(&self) -> bool {
    2899            0 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2900            0 :         tenant_conf
    2901            0 :             .compaction_l0_semaphore
    2902            0 :             .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
    2903            0 :     }
    2904              : 
    2905          636 :     fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
    2906              :         // By default, delay L0 flushes at 3x the compaction threshold. The compaction threshold
    2907              :         // defaults to 10, and L0 compaction is generally able to keep L0 counts below 30.
    2908              :         const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 3;
    2909              : 
    2910              :         // If compaction is disabled, don't delay.
    2911          636 :         if self.get_compaction_period() == Duration::ZERO {
    2912          632 :             return None;
    2913            4 :         }
    2914              : 
    2915            4 :         let compaction_threshold = self.get_compaction_threshold();
    2916            4 :         let tenant_conf = self.tenant_conf.load();
    2917            4 :         let l0_flush_delay_threshold = tenant_conf
    2918            4 :             .tenant_conf
    2919            4 :             .l0_flush_delay_threshold
    2920            4 :             .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
    2921            4 :             .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
    2922              : 
    2923              :         // 0 disables backpressure.
    2924            4 :         if l0_flush_delay_threshold == 0 {
    2925            0 :             return None;
    2926            4 :         }
    2927              : 
    2928              :         // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
    2929              :         // backpressure flushes below this.
    2930              :         // TODO: the tenant config should have validation to prevent this instead.
    2931            4 :         debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
    2932            4 :         Some(max(l0_flush_delay_threshold, compaction_threshold))
    2933          636 :     }
    2934              : 
    2935          637 :     fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
    2936              :         // Disable L0 stalls by default. Stalling can cause unavailability if L0 compaction isn't
    2937              :         // responsive, and it can e.g. block on other compaction via the compaction semaphore or
    2938              :         // sibling timelines. We need more confidence before enabling this.
    2939              :         const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
    2940              : 
    2941              :         // If compaction is disabled, don't stall.
    2942          637 :         if self.get_compaction_period() == Duration::ZERO {
    2943          632 :             return None;
    2944            5 :         }
    2945              : 
    2946              :         // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
    2947              :         // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
    2948              :         // on unbounded compaction debt that can take a long time to fix once compaction comes back
    2949              :         // online. At least we'll delay flushes, slowing down the growth and buying some time.
    2950            5 :         if self.compaction_failed.load(AtomicOrdering::Relaxed) {
    2951            0 :             return None;
    2952            5 :         }
    2953              : 
    2954            5 :         let compaction_threshold = self.get_compaction_threshold();
    2955            5 :         let tenant_conf = self.tenant_conf.load();
    2956            5 :         let l0_flush_stall_threshold = tenant_conf
    2957            5 :             .tenant_conf
    2958            5 :             .l0_flush_stall_threshold
    2959            5 :             .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
    2960              : 
    2961              :         // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
    2962              :         // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
    2963              :         // easily adjust the L0 backpressure settings, so just disable stalls in this case.
    2964            5 :         if cfg!(feature = "testing")
    2965            5 :             && compaction_threshold == 1
    2966            0 :             && l0_flush_stall_threshold.is_none()
    2967              :         {
    2968            0 :             return None;
    2969            5 :         }
    2970              : 
    2971            5 :         let l0_flush_stall_threshold = l0_flush_stall_threshold
    2972            5 :             .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
    2973              : 
    2974              :         // 0 disables backpressure.
    2975            5 :         if l0_flush_stall_threshold == 0 {
    2976            5 :             return None;
    2977            0 :         }
    2978              : 
    2979              :         // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
    2980              :         // backpressure flushes below this.
    2981              :         // TODO: the tenant config should have validation to prevent this instead.
    2982            0 :         debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
    2983            0 :         Some(max(l0_flush_stall_threshold, compaction_threshold))
    2984          637 :     }
    2985              : 
    2986           57 :     fn get_image_creation_threshold(&self) -> usize {
    2987           57 :         let tenant_conf = self.tenant_conf.load();
    2988           57 :         tenant_conf
    2989           57 :             .tenant_conf
    2990           57 :             .image_creation_threshold
    2991           57 :             .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
    2992           57 :     }
    2993              : 
    2994              :     // HADRON
    2995          193 :     fn get_image_layer_force_creation_period(&self) -> Option<Duration> {
    2996          193 :         let tenant_conf = self.tenant_conf.load();
    2997          193 :         tenant_conf
    2998          193 :             .tenant_conf
    2999          193 :             .image_layer_force_creation_period
    3000          193 :             .or(self
    3001          193 :                 .conf
    3002          193 :                 .default_tenant_conf
    3003          193 :                 .image_layer_force_creation_period)
    3004          193 :     }
    3005              : 
    3006          192 :     fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
    3007          192 :         let tenant_conf = &self.tenant_conf.load();
    3008          192 :         tenant_conf
    3009          192 :             .tenant_conf
    3010          192 :             .compaction_algorithm
    3011          192 :             .as_ref()
    3012          192 :             .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
    3013          192 :             .clone()
    3014          192 :     }
    3015              : 
    3016          192 :     pub fn get_compaction_shard_ancestor(&self) -> bool {
    3017          192 :         let tenant_conf = self.tenant_conf.load();
    3018          192 :         tenant_conf
    3019          192 :             .tenant_conf
    3020          192 :             .compaction_shard_ancestor
    3021          192 :             .unwrap_or(self.conf.default_tenant_conf.compaction_shard_ancestor)
    3022          192 :     }
    3023              : 
    3024            0 :     fn get_eviction_policy(&self) -> EvictionPolicy {
    3025            0 :         let tenant_conf = self.tenant_conf.load();
    3026            0 :         tenant_conf
    3027            0 :             .tenant_conf
    3028            0 :             .eviction_policy
    3029            0 :             .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
    3030            0 :     }
    3031              : 
    3032          235 :     fn get_evictions_low_residence_duration_metric_threshold(
    3033          235 :         tenant_conf: &pageserver_api::models::TenantConfig,
    3034          235 :         default_tenant_conf: &pageserver_api::config::TenantConfigToml,
    3035          235 :     ) -> Duration {
    3036          235 :         tenant_conf
    3037          235 :             .evictions_low_residence_duration_metric_threshold
    3038          235 :             .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
    3039          235 :     }
    3040              : 
    3041          191 :     fn get_image_layer_creation_check_threshold(&self) -> u8 {
    3042          191 :         let tenant_conf = self.tenant_conf.load();
    3043          191 :         tenant_conf
    3044          191 :             .tenant_conf
    3045          191 :             .image_layer_creation_check_threshold
    3046          191 :             .unwrap_or(
    3047          191 :                 self.conf
    3048          191 :                     .default_tenant_conf
    3049          191 :                     .image_layer_creation_check_threshold,
    3050              :             )
    3051          191 :     }
    3052              : 
    3053           27 :     fn get_gc_compaction_settings(&self) -> GcCompactionCombinedSettings {
    3054           27 :         let tenant_conf = &self.tenant_conf.load();
    3055           27 :         let gc_compaction_enabled = tenant_conf
    3056           27 :             .tenant_conf
    3057           27 :             .gc_compaction_enabled
    3058           27 :             .unwrap_or(self.conf.default_tenant_conf.gc_compaction_enabled);
    3059           27 :         let gc_compaction_verification = tenant_conf
    3060           27 :             .tenant_conf
    3061           27 :             .gc_compaction_verification
    3062           27 :             .unwrap_or(self.conf.default_tenant_conf.gc_compaction_verification);
    3063           27 :         let gc_compaction_initial_threshold_kb = tenant_conf
    3064           27 :             .tenant_conf
    3065           27 :             .gc_compaction_initial_threshold_kb
    3066           27 :             .unwrap_or(
    3067           27 :                 self.conf
    3068           27 :                     .default_tenant_conf
    3069           27 :                     .gc_compaction_initial_threshold_kb,
    3070              :             );
    3071           27 :         let gc_compaction_ratio_percent = tenant_conf
    3072           27 :             .tenant_conf
    3073           27 :             .gc_compaction_ratio_percent
    3074           27 :             .unwrap_or(self.conf.default_tenant_conf.gc_compaction_ratio_percent);
    3075           27 :         GcCompactionCombinedSettings {
    3076           27 :             gc_compaction_enabled,
    3077           27 :             gc_compaction_verification,
    3078           27 :             gc_compaction_initial_threshold_kb,
    3079           27 :             gc_compaction_ratio_percent,
    3080           27 :         }
    3081           27 :     }
    3082              : 
    3083            0 :     fn get_image_creation_preempt_threshold(&self) -> usize {
    3084            0 :         let tenant_conf = self.tenant_conf.load();
    3085            0 :         tenant_conf
    3086            0 :             .tenant_conf
    3087            0 :             .image_creation_preempt_threshold
    3088            0 :             .unwrap_or(
    3089            0 :                 self.conf
    3090            0 :                     .default_tenant_conf
    3091            0 :                     .image_creation_preempt_threshold,
    3092              :             )
    3093            0 :     }
    3094              : 
    3095            0 :     pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
    3096              :         // NB: Most tenant conf options are read by background loops, so,
    3097              :         // changes will automatically be picked up.
    3098              : 
    3099              :         // The threshold is embedded in the metric. So, we need to update it.
    3100              :         {
    3101            0 :             let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
    3102            0 :                 &new_conf.tenant_conf,
    3103            0 :                 &self.conf.default_tenant_conf,
    3104              :             );
    3105              : 
    3106            0 :             let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
    3107            0 :             let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
    3108              : 
    3109            0 :             let timeline_id_str = self.timeline_id.to_string();
    3110              : 
    3111            0 :             self.remote_client.update_config(&new_conf.location);
    3112              : 
    3113            0 :             let mut rel_size_cache = self.rel_size_snapshot_cache.lock().unwrap();
    3114            0 :             if let Some(new_capacity) = new_conf.tenant_conf.relsize_snapshot_cache_capacity {
    3115            0 :                 if new_capacity != rel_size_cache.capacity() {
    3116            0 :                     rel_size_cache.set_capacity(new_capacity);
    3117            0 :                 }
    3118            0 :             }
    3119              : 
    3120            0 :             self.metrics
    3121            0 :                 .evictions_with_low_residence_duration
    3122            0 :                 .write()
    3123            0 :                 .unwrap()
    3124            0 :                 .change_threshold(
    3125            0 :                     &tenant_id_str,
    3126            0 :                     &shard_id_str,
    3127            0 :                     &timeline_id_str,
    3128            0 :                     new_threshold,
    3129            0 :                 );
    3130              :         }
    3131            0 :     }
    3132              : 
    3133              :     /// Open a Timeline handle.
    3134              :     ///
    3135              :     /// Loads the metadata for the timeline into memory, but not the layer map.
    3136              :     #[allow(clippy::too_many_arguments)]
    3137          235 :     pub(super) fn new(
    3138          235 :         conf: &'static PageServerConf,
    3139          235 :         tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
    3140          235 :         metadata: &TimelineMetadata,
    3141          235 :         previous_heatmap: Option<PreviousHeatmap>,
    3142          235 :         ancestor: Option<Arc<Timeline>>,
    3143          235 :         timeline_id: TimelineId,
    3144          235 :         tenant_shard_id: TenantShardId,
    3145          235 :         generation: Generation,
    3146          235 :         shard_identity: ShardIdentity,
    3147          235 :         walredo_mgr: Option<Arc<super::WalRedoManager>>,
    3148          235 :         resources: TimelineResources,
    3149          235 :         pg_version: PgMajorVersion,
    3150          235 :         state: TimelineState,
    3151          235 :         attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
    3152          235 :         create_idempotency: crate::tenant::CreateTimelineIdempotency,
    3153          235 :         gc_compaction_state: Option<GcCompactionState>,
    3154          235 :         rel_size_v2_status: Option<RelSizeMigration>,
    3155          235 :         cancel: CancellationToken,
    3156          235 :     ) -> Arc<Self> {
    3157          235 :         let disk_consistent_lsn = metadata.disk_consistent_lsn();
    3158          235 :         let (state, _) = watch::channel(state);
    3159              : 
    3160          235 :         let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
    3161          235 :         let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
    3162              : 
    3163          235 :         let evictions_low_residence_duration_metric_threshold = {
    3164          235 :             let loaded_tenant_conf = tenant_conf.load();
    3165          235 :             Self::get_evictions_low_residence_duration_metric_threshold(
    3166          235 :                 &loaded_tenant_conf.tenant_conf,
    3167          235 :                 &conf.default_tenant_conf,
    3168              :             )
    3169              :         };
    3170              : 
    3171          235 :         if let Some(ancestor) = &ancestor {
    3172          118 :             let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
    3173          118 :             // If we construct an explicit timeline object, it's obviously not offloaded
    3174          118 :             let is_offloaded = MaybeOffloaded::No;
    3175          118 :             ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
    3176          118 :         }
    3177              : 
    3178          235 :         let relsize_snapshot_cache_capacity = {
    3179          235 :             let loaded_tenant_conf = tenant_conf.load();
    3180          235 :             loaded_tenant_conf
    3181          235 :                 .tenant_conf
    3182          235 :                 .relsize_snapshot_cache_capacity
    3183          235 :                 .unwrap_or(conf.default_tenant_conf.relsize_snapshot_cache_capacity)
    3184              :         };
    3185              : 
    3186          235 :         Arc::new_cyclic(|myself| {
    3187          235 :             let metrics = Arc::new(TimelineMetrics::new(
    3188          235 :                 &tenant_shard_id,
    3189          235 :                 &timeline_id,
    3190          235 :                 crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
    3191              :                     "mtime",
    3192          235 :                     evictions_low_residence_duration_metric_threshold,
    3193              :                 ),
    3194              :             ));
    3195          235 :             let aux_file_metrics = metrics.aux_file_size_gauge.clone();
    3196              : 
    3197          235 :             let mut result = Timeline {
    3198          235 :                 conf,
    3199          235 :                 tenant_conf,
    3200          235 :                 myself: myself.clone(),
    3201          235 :                 timeline_id,
    3202          235 :                 tenant_shard_id,
    3203          235 :                 generation,
    3204          235 :                 shard_identity,
    3205          235 :                 pg_version,
    3206          235 :                 layers: Default::default(),
    3207          235 :                 gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
    3208              : 
    3209          235 :                 walredo_mgr,
    3210          235 :                 walreceiver: Mutex::new(None),
    3211              : 
    3212          235 :                 remote_client: Arc::new(resources.remote_client),
    3213              : 
    3214              :                 // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
    3215          235 :                 last_record_lsn: SeqWait::new(RecordLsn {
    3216          235 :                     last: disk_consistent_lsn,
    3217          235 :                     prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
    3218          235 :                 }),
    3219          235 :                 disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
    3220              : 
    3221          235 :                 gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
    3222              : 
    3223          235 :                 last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
    3224          235 :                 last_freeze_ts: RwLock::new(Instant::now()),
    3225              : 
    3226          235 :                 loaded_at: (disk_consistent_lsn, SystemTime::now()),
    3227              : 
    3228          235 :                 ancestor_timeline: ancestor,
    3229          235 :                 ancestor_lsn: metadata.ancestor_lsn(),
    3230              : 
    3231          235 :                 metrics,
    3232              : 
    3233          235 :                 query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
    3234          235 :                     &tenant_shard_id,
    3235          235 :                     &timeline_id,
    3236          235 :                     resources.pagestream_throttle_metrics,
    3237              :                 ),
    3238              : 
    3239         1880 :                 directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
    3240         1880 :                 directory_metrics_inited: array::from_fn(|_| AtomicBool::new(false)),
    3241              : 
    3242          235 :                 flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
    3243              : 
    3244          235 :                 layer_flush_start_tx,
    3245          235 :                 layer_flush_done_tx,
    3246              : 
    3247          235 :                 write_lock: tokio::sync::Mutex::new(None),
    3248              : 
    3249          235 :                 gc_info: std::sync::RwLock::new(GcInfo::default()),
    3250              : 
    3251          235 :                 last_image_layer_creation_status: ArcSwap::new(Arc::new(
    3252          235 :                     LastImageLayerCreationStatus::default(),
    3253              :                 )),
    3254              : 
    3255          235 :                 applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
    3256          235 :                 initdb_lsn: metadata.initdb_lsn(),
    3257              : 
    3258          235 :                 current_logical_size: if disk_consistent_lsn.is_valid() {
    3259              :                     // we're creating timeline data with some layer files existing locally,
    3260              :                     // need to recalculate timeline's logical size based on data in the layers.
    3261          120 :                     LogicalSize::deferred_initial(disk_consistent_lsn)
    3262              :                 } else {
    3263              :                     // we're creating timeline data without any layers existing locally,
    3264              :                     // initial logical size is 0.
    3265          115 :                     LogicalSize::empty_initial()
    3266              :                 },
    3267              : 
    3268          235 :                 partitioning: GuardArcSwap::new((
    3269          235 :                     (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
    3270          235 :                     Lsn(0),
    3271          235 :                 )),
    3272              :                 repartition_threshold: 0,
    3273          235 :                 last_image_layer_creation_check_at: AtomicLsn::new(0),
    3274          235 :                 last_image_layer_creation_check_instant: Mutex::new(None),
    3275          235 :                 last_received_wal: Mutex::new(None),
    3276          235 :                 rel_size_latest_cache: RwLock::new(HashMap::new()),
    3277          235 :                 rel_size_snapshot_cache: Mutex::new(LruCache::new(relsize_snapshot_cache_capacity)),
    3278              : 
    3279          235 :                 download_all_remote_layers_task_info: RwLock::new(None),
    3280              : 
    3281          235 :                 state,
    3282              : 
    3283          235 :                 eviction_task_timeline_state: tokio::sync::Mutex::new(
    3284          235 :                     EvictionTaskTimelineState::default(),
    3285              :                 ),
    3286          235 :                 delete_progress: TimelineDeleteProgress::default(),
    3287              : 
    3288          235 :                 cancel,
    3289          235 :                 gate: Gate::default(),
    3290              : 
    3291          235 :                 compaction_lock: tokio::sync::Mutex::default(),
    3292          235 :                 compaction_failed: AtomicBool::default(),
    3293          235 :                 l0_compaction_trigger: resources.l0_compaction_trigger,
    3294          235 :                 gc_lock: tokio::sync::Mutex::default(),
    3295              : 
    3296          235 :                 standby_horizon: AtomicLsn::new(0),
    3297              : 
    3298          235 :                 pagestream_throttle: resources.pagestream_throttle,
    3299              : 
    3300          235 :                 aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
    3301              : 
    3302              :                 #[cfg(test)]
    3303          235 :                 extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
    3304              : 
    3305          235 :                 l0_flush_global_state: resources.l0_flush_global_state,
    3306              : 
    3307          235 :                 handles: Default::default(),
    3308              : 
    3309          235 :                 attach_wal_lag_cooldown,
    3310              : 
    3311          235 :                 create_idempotency,
    3312              : 
    3313          235 :                 page_trace: Default::default(),
    3314              : 
    3315          235 :                 previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
    3316              : 
    3317          235 :                 heatmap_layers_downloader: Mutex::new(None),
    3318              : 
    3319          235 :                 rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
    3320              : 
    3321          235 :                 wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
    3322              : 
    3323          235 :                 basebackup_cache: resources.basebackup_cache,
    3324              : 
    3325          235 :                 feature_resolver: resources.feature_resolver.clone(),
    3326              :             };
    3327              : 
    3328          235 :             result.repartition_threshold =
    3329          235 :                 result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
    3330              : 
    3331          235 :             result
    3332          235 :                 .metrics
    3333          235 :                 .last_record_lsn_gauge
    3334          235 :                 .set(disk_consistent_lsn.0 as i64);
    3335          235 :             result
    3336          235 :         })
    3337          235 :     }
    3338              : 
    3339          343 :     pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
    3340          343 :         let Ok(guard) = self.gate.enter() else {
    3341            0 :             info!("cannot start flush loop when the timeline gate has already been closed");
    3342            0 :             return;
    3343              :         };
    3344          343 :         let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
    3345          343 :         match *flush_loop_state {
    3346          232 :             FlushLoopState::NotStarted => (),
    3347              :             FlushLoopState::Running { .. } => {
    3348          111 :                 info!(
    3349            0 :                     "skipping attempt to start flush_loop twice {}/{}",
    3350            0 :                     self.tenant_shard_id, self.timeline_id
    3351              :                 );
    3352          111 :                 return;
    3353              :             }
    3354              :             FlushLoopState::Exited => {
    3355            0 :                 info!(
    3356            0 :                     "ignoring attempt to restart exited flush_loop {}/{}",
    3357            0 :                     self.tenant_shard_id, self.timeline_id
    3358              :                 );
    3359            0 :                 return;
    3360              :             }
    3361              :         }
    3362              : 
    3363          232 :         let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
    3364          232 :         let self_clone = Arc::clone(self);
    3365              : 
    3366          232 :         debug!("spawning flush loop");
    3367          232 :         *flush_loop_state = FlushLoopState::Running {
    3368          232 :             #[cfg(test)]
    3369          232 :             expect_initdb_optimization: false,
    3370          232 :             #[cfg(test)]
    3371          232 :             initdb_optimization_count: 0,
    3372          232 :         };
    3373          232 :         task_mgr::spawn(
    3374          232 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3375          232 :             task_mgr::TaskKind::LayerFlushTask,
    3376          232 :             self.tenant_shard_id,
    3377          232 :             Some(self.timeline_id),
    3378          232 :             "layer flush task",
    3379          232 :             async move {
    3380          232 :                 let _guard = guard;
    3381          232 :                 let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error).with_scope_timeline(&self_clone);
    3382          232 :                 self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
    3383            5 :                 let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
    3384            5 :                 assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
    3385            5 :                 *flush_loop_state  = FlushLoopState::Exited;
    3386            5 :                 Ok(())
    3387            5 :             }
    3388          232 :             .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    3389              :         );
    3390          343 :     }
    3391              : 
    3392            0 :     pub(crate) fn update_gc_compaction_state(
    3393            0 :         &self,
    3394            0 :         gc_compaction_state: GcCompactionState,
    3395            0 :     ) -> anyhow::Result<()> {
    3396            0 :         self.gc_compaction_state
    3397            0 :             .store(Arc::new(Some(gc_compaction_state.clone())));
    3398            0 :         self.remote_client
    3399            0 :             .schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
    3400            0 :     }
    3401              : 
    3402            0 :     pub(crate) fn update_rel_size_v2_status(
    3403            0 :         &self,
    3404            0 :         rel_size_v2_status: RelSizeMigration,
    3405            0 :     ) -> anyhow::Result<()> {
    3406            0 :         self.rel_size_v2_status
    3407            0 :             .store(Some(Arc::new(rel_size_v2_status.clone())));
    3408            0 :         self.remote_client
    3409            0 :             .schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
    3410            0 :     }
    3411              : 
    3412            0 :     pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
    3413            0 :         self.gc_compaction_state.load_full().as_ref().clone()
    3414            0 :     }
    3415              : 
    3416              :     /// Creates and starts the wal receiver.
    3417              :     ///
    3418              :     /// This function is expected to be called at most once per Timeline's lifecycle
    3419              :     /// when the timeline is activated.
    3420            0 :     fn launch_wal_receiver(
    3421            0 :         self: &Arc<Self>,
    3422            0 :         ctx: &RequestContext,
    3423            0 :         broker_client: BrokerClientChannel,
    3424            0 :     ) {
    3425            0 :         info!(
    3426            0 :             "launching WAL receiver for timeline {} of tenant {}",
    3427            0 :             self.timeline_id, self.tenant_shard_id
    3428              :         );
    3429              : 
    3430            0 :         let tenant_conf = self.tenant_conf.load();
    3431            0 :         let wal_connect_timeout = tenant_conf
    3432            0 :             .tenant_conf
    3433            0 :             .walreceiver_connect_timeout
    3434            0 :             .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
    3435            0 :         let lagging_wal_timeout = tenant_conf
    3436            0 :             .tenant_conf
    3437            0 :             .lagging_wal_timeout
    3438            0 :             .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
    3439            0 :         let max_lsn_wal_lag = tenant_conf
    3440            0 :             .tenant_conf
    3441            0 :             .max_lsn_wal_lag
    3442            0 :             .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
    3443              : 
    3444            0 :         let mut guard = self.walreceiver.lock().unwrap();
    3445            0 :         assert!(
    3446            0 :             guard.is_none(),
    3447            0 :             "multiple launches / re-launches of WAL receiver are not supported"
    3448              :         );
    3449              : 
    3450            0 :         let protocol = PostgresClientProtocol::Interpreted {
    3451            0 :             format: utils::postgres_client::InterpretedFormat::Protobuf,
    3452            0 :             compression: Some(utils::postgres_client::Compression::Zstd { level: 1 }),
    3453            0 :         };
    3454              : 
    3455            0 :         *guard = Some(WalReceiver::start(
    3456            0 :             Arc::clone(self),
    3457            0 :             WalReceiverConf {
    3458            0 :                 protocol,
    3459            0 :                 wal_connect_timeout,
    3460            0 :                 lagging_wal_timeout,
    3461            0 :                 max_lsn_wal_lag,
    3462            0 :                 auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
    3463            0 :                 availability_zone: self.conf.availability_zone.clone(),
    3464            0 :                 ingest_batch_size: self.conf.ingest_batch_size,
    3465            0 :                 validate_wal_contiguity: self.conf.validate_wal_contiguity,
    3466            0 :             },
    3467            0 :             broker_client,
    3468            0 :             ctx,
    3469            0 :         ));
    3470            0 :     }
    3471              : 
    3472              :     /// Initialize with an empty layer map. Used when creating a new timeline.
    3473          232 :     pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
    3474          232 :         let mut layers = self.layers.try_write(LayerManagerLockHolder::Init).expect(
    3475          232 :             "in the context where we call this function, no other task has access to the object",
    3476              :         );
    3477          232 :         layers
    3478          232 :             .open_mut()
    3479          232 :             .expect("in this context the LayerManager must still be open")
    3480          232 :             .initialize_empty(Lsn(start_lsn.0));
    3481          232 :     }
    3482              : 
    3483              :     /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
    3484              :     /// files.
    3485            3 :     pub(super) async fn load_layer_map(
    3486            3 :         &self,
    3487            3 :         disk_consistent_lsn: Lsn,
    3488            3 :         index_part: IndexPart,
    3489            3 :     ) -> anyhow::Result<()> {
    3490              :         use LayerName::*;
    3491              :         use init::Decision::*;
    3492              :         use init::{Discovered, DismissedLayer};
    3493              : 
    3494            3 :         let mut guard = self
    3495            3 :             .layers
    3496            3 :             .write(LayerManagerLockHolder::LoadLayerMap)
    3497            3 :             .await;
    3498              : 
    3499            3 :         let timer = self.metrics.load_layer_map_histo.start_timer();
    3500              : 
    3501              :         // Scan timeline directory and create ImageLayerName and DeltaFilename
    3502              :         // structs representing all files on disk
    3503            3 :         let timeline_path = self
    3504            3 :             .conf
    3505            3 :             .timeline_path(&self.tenant_shard_id, &self.timeline_id);
    3506            3 :         let conf = self.conf;
    3507            3 :         let span = tracing::Span::current();
    3508              : 
    3509              :         // Copy to move into the task we're about to spawn
    3510            3 :         let this = self.myself.upgrade().expect("&self method holds the arc");
    3511              : 
    3512            3 :         let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
    3513            3 :             move || {
    3514            3 :                 let _g = span.entered();
    3515            3 :                 let discovered = init::scan_timeline_dir(&timeline_path)?;
    3516            3 :                 let mut discovered_layers = Vec::with_capacity(discovered.len());
    3517            3 :                 let mut unrecognized_files = Vec::new();
    3518              : 
    3519            3 :                 let mut path = timeline_path;
    3520              : 
    3521           11 :                 for discovered in discovered {
    3522            8 :                     let (name, kind) = match discovered {
    3523            8 :                         Discovered::Layer(layer_file_name, local_metadata) => {
    3524            8 :                             discovered_layers.push((layer_file_name, local_metadata));
    3525            8 :                             continue;
    3526              :                         }
    3527            0 :                         Discovered::IgnoredBackup(path) => {
    3528            0 :                             std::fs::remove_file(path)
    3529            0 :                                 .or_else(fs_ext::ignore_not_found)
    3530            0 :                                 .fatal_err("Removing .old file");
    3531            0 :                             continue;
    3532              :                         }
    3533            0 :                         Discovered::Unknown(file_name) => {
    3534              :                             // we will later error if there are any
    3535            0 :                             unrecognized_files.push(file_name);
    3536            0 :                             continue;
    3537              :                         }
    3538            0 :                         Discovered::Ephemeral(name) => (name, "old ephemeral file"),
    3539            0 :                         Discovered::Temporary(name) => (name, "temporary timeline file"),
    3540            0 :                         Discovered::TemporaryDownload(name) => (name, "temporary download"),
    3541              :                     };
    3542            0 :                     path.push(Utf8Path::new(&name));
    3543            0 :                     init::cleanup(&path, kind)?;
    3544            0 :                     path.pop();
    3545              :                 }
    3546              : 
    3547            3 :                 if !unrecognized_files.is_empty() {
    3548              :                     // assume that if there are any there are many many.
    3549            0 :                     let n = unrecognized_files.len();
    3550            0 :                     let first = &unrecognized_files[..n.min(10)];
    3551            0 :                     anyhow::bail!(
    3552            0 :                         "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
    3553              :                     );
    3554            3 :                 }
    3555              : 
    3556            3 :                 let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
    3557              : 
    3558            3 :                 let mut loaded_layers = Vec::new();
    3559            3 :                 let mut needs_cleanup = Vec::new();
    3560            3 :                 let mut total_physical_size = 0;
    3561              : 
    3562           11 :                 for (name, decision) in decided {
    3563            8 :                     let decision = match decision {
    3564            8 :                         Ok(decision) => decision,
    3565            0 :                         Err(DismissedLayer::Future { local }) => {
    3566            0 :                             if let Some(local) = local {
    3567            0 :                                 init::cleanup_future_layer(
    3568            0 :                                     &local.local_path,
    3569            0 :                                     &name,
    3570            0 :                                     disk_consistent_lsn,
    3571            0 :                                 )?;
    3572            0 :                             }
    3573            0 :                             needs_cleanup.push(name);
    3574            0 :                             continue;
    3575              :                         }
    3576            0 :                         Err(DismissedLayer::LocalOnly(local)) => {
    3577            0 :                             init::cleanup_local_only_file(&name, &local)?;
    3578              :                             // this file never existed remotely, we will have to do rework
    3579            0 :                             continue;
    3580              :                         }
    3581            0 :                         Err(DismissedLayer::BadMetadata(local)) => {
    3582            0 :                             init::cleanup_local_file_for_remote(&local)?;
    3583              :                             // this file never existed remotely, we will have to do rework
    3584            0 :                             continue;
    3585              :                         }
    3586              :                     };
    3587              : 
    3588            8 :                     match &name {
    3589            6 :                         Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
    3590            2 :                         Image(i) => assert!(i.lsn <= disk_consistent_lsn),
    3591              :                     }
    3592              : 
    3593            8 :                     tracing::debug!(layer=%name, ?decision, "applied");
    3594              : 
    3595            8 :                     let layer = match decision {
    3596            8 :                         Resident { local, remote } => {
    3597            8 :                             total_physical_size += local.file_size;
    3598            8 :                             Layer::for_resident(conf, &this, local.local_path, name, remote)
    3599            8 :                                 .drop_eviction_guard()
    3600              :                         }
    3601            0 :                         Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
    3602              :                     };
    3603              : 
    3604            8 :                     loaded_layers.push(layer);
    3605              :                 }
    3606            3 :                 Ok((loaded_layers, needs_cleanup, total_physical_size))
    3607            3 :             }
    3608              :         })
    3609            3 :         .await
    3610            3 :         .map_err(anyhow::Error::new)
    3611            3 :         .and_then(|x| x)?;
    3612              : 
    3613            3 :         let num_layers = loaded_layers.len();
    3614              : 
    3615            3 :         guard
    3616            3 :             .open_mut()
    3617            3 :             .expect("layermanager must be open during init")
    3618            3 :             .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
    3619              : 
    3620            3 :         self.remote_client
    3621            3 :             .schedule_layer_file_deletion(&needs_cleanup)?;
    3622            3 :         self.remote_client
    3623            3 :             .schedule_index_upload_for_file_changes()?;
    3624              :         // This barrier orders above DELETEs before any later operations.
    3625              :         // This is critical because code executing after the barrier might
    3626              :         // create again objects with the same key that we just scheduled for deletion.
    3627              :         // For example, if we just scheduled deletion of an image layer "from the future",
    3628              :         // later compaction might run again and re-create the same image layer.
    3629              :         // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
    3630              :         // "same" here means same key range and LSN.
    3631              :         //
    3632              :         // Without a barrier between above DELETEs and the re-creation's PUTs,
    3633              :         // the upload queue may execute the PUT first, then the DELETE.
    3634              :         // In our example, we will end up with an IndexPart referencing a non-existent object.
    3635              :         //
    3636              :         // 1. a future image layer is created and uploaded
    3637              :         // 2. ps restart
    3638              :         // 3. the future layer from (1) is deleted during load layer map
    3639              :         // 4. image layer is re-created and uploaded
    3640              :         // 5. deletion queue would like to delete (1) but actually deletes (4)
    3641              :         // 6. delete by name works as expected, but it now deletes the wrong (later) version
    3642              :         //
    3643              :         // See https://github.com/neondatabase/neon/issues/5878
    3644              :         //
    3645              :         // NB: generation numbers naturally protect against this because they disambiguate
    3646              :         //     (1) and (4)
    3647              :         // TODO: this is basically a no-op now, should we remove it?
    3648            3 :         self.remote_client.schedule_barrier()?;
    3649              :         // TenantShard::create_timeline will wait for these uploads to happen before returning, or
    3650              :         // on retry.
    3651              : 
    3652            3 :         info!(
    3653            0 :             "loaded layer map with {} layers at {}, total physical size: {}",
    3654              :             num_layers, disk_consistent_lsn, total_physical_size
    3655              :         );
    3656              : 
    3657            3 :         timer.stop_and_record();
    3658            3 :         Ok(())
    3659            3 :     }
    3660              : 
    3661              :     /// Retrieve current logical size of the timeline.
    3662              :     ///
    3663              :     /// The size could be lagging behind the actual number, in case
    3664              :     /// the initial size calculation has not been run (gets triggered on the first size access).
    3665              :     ///
    3666              :     /// return size and boolean flag that shows if the size is exact
    3667            0 :     pub(crate) fn get_current_logical_size(
    3668            0 :         self: &Arc<Self>,
    3669            0 :         priority: GetLogicalSizePriority,
    3670            0 :         ctx: &RequestContext,
    3671            0 :     ) -> logical_size::CurrentLogicalSize {
    3672            0 :         if !self.tenant_shard_id.is_shard_zero() {
    3673              :             // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
    3674              :             // when HTTP API is serving a GET for timeline zero, return zero
    3675            0 :             return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
    3676            0 :         }
    3677              : 
    3678            0 :         let current_size = self.current_logical_size.current_size();
    3679            0 :         debug!("Current size: {current_size:?}");
    3680              : 
    3681            0 :         match (current_size.accuracy(), priority) {
    3682            0 :             (logical_size::Accuracy::Exact, _) => (), // nothing to do
    3683            0 :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
    3684            0 :                 // background task will eventually deliver an exact value, we're in no rush
    3685            0 :             }
    3686              :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
    3687              :                 // background task is not ready, but user is asking for it now;
    3688              :                 // => make the background task skip the line
    3689              :                 // (The alternative would be to calculate the size here, but,
    3690              :                 //  it can actually take a long time if the user has a lot of rels.
    3691              :                 //  And we'll inevitable need it again; So, let the background task do the work.)
    3692            0 :                 match self
    3693            0 :                     .current_logical_size
    3694            0 :                     .cancel_wait_for_background_loop_concurrency_limit_semaphore
    3695            0 :                     .get()
    3696              :                 {
    3697            0 :                     Some(cancel) => cancel.cancel(),
    3698              :                     None => {
    3699            0 :                         match self.current_state() {
    3700            0 :                             TimelineState::Broken { .. } | TimelineState::Stopping => {
    3701            0 :                                 // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
    3702            0 :                                 // Don't make noise.
    3703            0 :                             }
    3704              :                             TimelineState::Loading => {
    3705              :                                 // Import does not return an activated timeline.
    3706            0 :                                 info!(
    3707            0 :                                     "discarding priority boost for logical size calculation because timeline is not yet active"
    3708              :                                 );
    3709              :                             }
    3710              :                             TimelineState::Active => {
    3711              :                                 // activation should be setting the once cell
    3712            0 :                                 warn!(
    3713            0 :                                     "unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work"
    3714              :                                 );
    3715            0 :                                 debug_assert!(false);
    3716              :                             }
    3717              :                         }
    3718              :                     }
    3719              :                 }
    3720              :             }
    3721              :         }
    3722              : 
    3723            0 :         if let CurrentLogicalSize::Approximate(_) = &current_size {
    3724            0 :             if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
    3725            0 :                 let first = self
    3726            0 :                     .current_logical_size
    3727            0 :                     .did_return_approximate_to_walreceiver
    3728            0 :                     .compare_exchange(
    3729            0 :                         false,
    3730            0 :                         true,
    3731            0 :                         AtomicOrdering::Relaxed,
    3732            0 :                         AtomicOrdering::Relaxed,
    3733            0 :                     )
    3734            0 :                     .is_ok();
    3735            0 :                 if first {
    3736            0 :                     crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
    3737            0 :                 }
    3738            0 :             }
    3739            0 :         }
    3740              : 
    3741            0 :         current_size
    3742            0 :     }
    3743              : 
    3744            0 :     fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
    3745            0 :         let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
    3746              :             // nothing to do for freshly created timelines;
    3747            0 :             assert_eq!(
    3748            0 :                 self.current_logical_size.current_size().accuracy(),
    3749              :                 logical_size::Accuracy::Exact,
    3750              :             );
    3751            0 :             self.current_logical_size.initialized.add_permits(1);
    3752            0 :             return;
    3753              :         };
    3754              : 
    3755            0 :         let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
    3756            0 :         let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
    3757            0 :         self.current_logical_size
    3758            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
    3759            0 :             .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
    3760              : 
    3761            0 :         let self_clone = Arc::clone(self);
    3762            0 :         let background_ctx = ctx.detached_child(
    3763            0 :             TaskKind::InitialLogicalSizeCalculation,
    3764            0 :             DownloadBehavior::Download,
    3765              :         );
    3766            0 :         task_mgr::spawn(
    3767            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3768            0 :             task_mgr::TaskKind::InitialLogicalSizeCalculation,
    3769            0 :             self.tenant_shard_id,
    3770            0 :             Some(self.timeline_id),
    3771            0 :             "initial size calculation",
    3772              :             // NB: don't log errors here, task_mgr will do that.
    3773            0 :             async move {
    3774            0 :                 self_clone
    3775            0 :                     .initial_logical_size_calculation_task(
    3776            0 :                         initial_part_end,
    3777            0 :                         cancel_wait_for_background_loop_concurrency_limit_semaphore,
    3778            0 :                         background_ctx,
    3779            0 :                     )
    3780            0 :                     .await;
    3781            0 :                 Ok(())
    3782            0 :             }
    3783            0 :             .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
    3784              :         );
    3785            0 :     }
    3786              : 
    3787              :     /// # Cancellation
    3788              :     ///
    3789              :     /// This method is sensitive to `Timeline::cancel`.
    3790              :     ///
    3791              :     /// It is _not_ sensitive to task_mgr::shutdown_token().
    3792              :     ///
    3793              :     /// # Cancel-Safety
    3794              :     ///
    3795              :     /// It does Timeline IO, hence this should be polled to completion because
    3796              :     /// we could be leaving in-flight IOs behind, which is safe, but annoying
    3797              :     /// to reason about.
    3798            0 :     async fn initial_logical_size_calculation_task(
    3799            0 :         self: Arc<Self>,
    3800            0 :         initial_part_end: Lsn,
    3801            0 :         skip_concurrency_limiter: CancellationToken,
    3802            0 :         background_ctx: RequestContext,
    3803            0 :     ) {
    3804            0 :         scopeguard::defer! {
    3805              :             // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
    3806              :             self.current_logical_size.initialized.add_permits(1);
    3807              :         }
    3808              : 
    3809            0 :         let try_once = |attempt: usize| {
    3810            0 :             let background_ctx = &background_ctx;
    3811            0 :             let self_ref = &self;
    3812            0 :             let skip_concurrency_limiter = &skip_concurrency_limiter;
    3813            0 :             async move {
    3814            0 :                 let wait_for_permit = super::tasks::acquire_concurrency_permit(
    3815            0 :                     BackgroundLoopKind::InitialLogicalSizeCalculation,
    3816            0 :                     background_ctx,
    3817              :                 );
    3818              : 
    3819              :                 use crate::metrics::initial_logical_size::StartCircumstances;
    3820            0 :                 let (_maybe_permit, circumstances) = tokio::select! {
    3821            0 :                     permit = wait_for_permit => {
    3822            0 :                         (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
    3823              :                     }
    3824            0 :                     _ = self_ref.cancel.cancelled() => {
    3825            0 :                         return Err(CalculateLogicalSizeError::Cancelled);
    3826              :                     }
    3827            0 :                     () = skip_concurrency_limiter.cancelled() => {
    3828              :                         // Some action that is part of a end user interaction requested logical size
    3829              :                         // => break out of the rate limit
    3830              :                         // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
    3831              :                         // but then again what happens if they cancel; also, we should just be using
    3832              :                         // one runtime across the entire process, so, let's leave this for now.
    3833            0 :                         (None, StartCircumstances::SkippedConcurrencyLimiter)
    3834              :                     }
    3835              :                 };
    3836              : 
    3837            0 :                 let metrics_guard = if attempt == 1 {
    3838            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
    3839              :                 } else {
    3840            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
    3841              :                 };
    3842              : 
    3843            0 :                 let io_concurrency = IoConcurrency::spawn_from_conf(
    3844            0 :                     self_ref.conf.get_vectored_concurrent_io,
    3845            0 :                     self_ref
    3846            0 :                         .gate
    3847            0 :                         .enter()
    3848            0 :                         .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
    3849              :                 );
    3850              : 
    3851            0 :                 let calculated_size = self_ref
    3852            0 :                     .logical_size_calculation_task(
    3853            0 :                         initial_part_end,
    3854            0 :                         LogicalSizeCalculationCause::Initial,
    3855            0 :                         background_ctx,
    3856            0 :                     )
    3857            0 :                     .await?;
    3858              : 
    3859            0 :                 self_ref
    3860            0 :                     .trigger_aux_file_size_computation(
    3861            0 :                         initial_part_end,
    3862            0 :                         background_ctx,
    3863            0 :                         io_concurrency,
    3864            0 :                     )
    3865            0 :                     .await?;
    3866              : 
    3867              :                 // TODO: add aux file size to logical size
    3868              : 
    3869            0 :                 Ok((calculated_size, metrics_guard))
    3870            0 :             }
    3871            0 :         };
    3872              : 
    3873            0 :         let retrying = async {
    3874            0 :             let mut attempt = 0;
    3875              :             loop {
    3876            0 :                 attempt += 1;
    3877              : 
    3878            0 :                 match try_once(attempt).await {
    3879            0 :                     Ok(res) => return ControlFlow::Continue(res),
    3880            0 :                     Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
    3881              :                     Err(
    3882            0 :                         e @ (CalculateLogicalSizeError::Decode(_)
    3883              :                         | CalculateLogicalSizeError::PageRead(_)),
    3884              :                     ) => {
    3885            0 :                         warn!(attempt, "initial size calculation failed: {e:?}");
    3886              :                         // exponential back-off doesn't make sense at these long intervals;
    3887              :                         // use fixed retry interval with generous jitter instead
    3888            0 :                         let sleep_duration = Duration::from_secs(
    3889            0 :                             u64::try_from(
    3890              :                                 // 1hour base
    3891            0 :                                 (60_i64 * 60_i64)
    3892            0 :                                     // 10min jitter
    3893            0 :                                     + rand::thread_rng().gen_range(-10 * 60..10 * 60),
    3894              :                             )
    3895            0 :                             .expect("10min < 1hour"),
    3896              :                         );
    3897            0 :                         tokio::select! {
    3898            0 :                             _ = tokio::time::sleep(sleep_duration) => {}
    3899            0 :                             _ = self.cancel.cancelled() => return ControlFlow::Break(()),
    3900              :                         }
    3901              :                     }
    3902              :                 }
    3903              :             }
    3904            0 :         };
    3905              : 
    3906            0 :         let (calculated_size, metrics_guard) = match retrying.await {
    3907            0 :             ControlFlow::Continue(calculated_size) => calculated_size,
    3908            0 :             ControlFlow::Break(()) => return,
    3909              :         };
    3910              : 
    3911              :         // we cannot query current_logical_size.current_size() to know the current
    3912              :         // *negative* value, only truncated to u64.
    3913            0 :         let added = self
    3914            0 :             .current_logical_size
    3915            0 :             .size_added_after_initial
    3916            0 :             .load(AtomicOrdering::Relaxed);
    3917              : 
    3918            0 :         let sum = calculated_size.saturating_add_signed(added);
    3919              : 
    3920              :         // set the gauge value before it can be set in `update_current_logical_size`.
    3921            0 :         self.metrics.current_logical_size_gauge.set(sum);
    3922              : 
    3923            0 :         self.current_logical_size
    3924            0 :             .initial_logical_size
    3925            0 :             .set((calculated_size, metrics_guard.calculation_result_saved()))
    3926            0 :             .ok()
    3927            0 :             .expect("only this task sets it");
    3928            0 :     }
    3929              : 
    3930            7 :     pub(crate) fn spawn_ondemand_logical_size_calculation(
    3931            7 :         self: &Arc<Self>,
    3932            7 :         lsn: Lsn,
    3933            7 :         cause: LogicalSizeCalculationCause,
    3934            7 :         ctx: RequestContext,
    3935            7 :     ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
    3936            7 :         let (sender, receiver) = oneshot::channel();
    3937            7 :         let self_clone = Arc::clone(self);
    3938              :         // XXX if our caller loses interest, i.e., ctx is cancelled,
    3939              :         // we should stop the size calculation work and return an error.
    3940              :         // That would require restructuring this function's API to
    3941              :         // return the result directly, instead of a Receiver for the result.
    3942            7 :         let ctx = ctx.detached_child(
    3943            7 :             TaskKind::OndemandLogicalSizeCalculation,
    3944            7 :             DownloadBehavior::Download,
    3945              :         );
    3946            7 :         task_mgr::spawn(
    3947            7 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3948            7 :             task_mgr::TaskKind::OndemandLogicalSizeCalculation,
    3949            7 :             self.tenant_shard_id,
    3950            7 :             Some(self.timeline_id),
    3951            7 :             "ondemand logical size calculation",
    3952            7 :             async move {
    3953            7 :                 let res = self_clone
    3954            7 :                     .logical_size_calculation_task(lsn, cause, &ctx)
    3955            7 :                     .await;
    3956            7 :                 let _ = sender.send(res).ok();
    3957            7 :                 Ok(()) // Receiver is responsible for handling errors
    3958            7 :             }
    3959            7 :             .in_current_span(),
    3960              :         );
    3961            7 :         receiver
    3962            7 :     }
    3963              : 
    3964              :     #[instrument(skip_all)]
    3965              :     async fn logical_size_calculation_task(
    3966              :         self: &Arc<Self>,
    3967              :         lsn: Lsn,
    3968              :         cause: LogicalSizeCalculationCause,
    3969              :         ctx: &RequestContext,
    3970              :     ) -> Result<u64, CalculateLogicalSizeError> {
    3971              :         crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
    3972              :         // We should never be calculating logical sizes on shard !=0, because these shards do not have
    3973              :         // accurate relation sizes, and they do not emit consumption metrics.
    3974              :         debug_assert!(self.tenant_shard_id.is_shard_zero());
    3975              : 
    3976              :         let guard = self
    3977              :             .gate
    3978              :             .enter()
    3979              :             .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
    3980              : 
    3981              :         self.calculate_logical_size(lsn, cause, &guard, ctx).await
    3982              :     }
    3983              : 
    3984              :     /// Calculate the logical size of the database at the latest LSN.
    3985              :     ///
    3986              :     /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
    3987              :     /// especially if we need to download remote layers.
    3988            7 :     async fn calculate_logical_size(
    3989            7 :         &self,
    3990            7 :         up_to_lsn: Lsn,
    3991            7 :         cause: LogicalSizeCalculationCause,
    3992            7 :         _guard: &GateGuard,
    3993            7 :         ctx: &RequestContext,
    3994            7 :     ) -> Result<u64, CalculateLogicalSizeError> {
    3995            7 :         info!(
    3996            0 :             "Calculating logical size for timeline {} at {}",
    3997              :             self.timeline_id, up_to_lsn
    3998              :         );
    3999              : 
    4000            7 :         if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
    4001              :         {
    4002            0 :             return Err(CalculateLogicalSizeError::Cancelled);
    4003            7 :         }
    4004              : 
    4005              :         // See if we've already done the work for initial size calculation.
    4006              :         // This is a short-cut for timelines that are mostly unused.
    4007            7 :         if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
    4008            0 :             return Ok(size);
    4009            7 :         }
    4010            7 :         let storage_time_metrics = match cause {
    4011              :             LogicalSizeCalculationCause::Initial
    4012              :             | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
    4013            0 :             | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
    4014              :             LogicalSizeCalculationCause::EvictionTaskImitation => {
    4015            7 :                 &self.metrics.imitate_logical_size_histo
    4016              :             }
    4017              :         };
    4018            7 :         let timer = storage_time_metrics.start_timer();
    4019            7 :         let logical_size = self
    4020            7 :             .get_current_logical_size_non_incremental(up_to_lsn, ctx)
    4021            7 :             .await?;
    4022            7 :         debug!("calculated logical size: {logical_size}");
    4023            7 :         timer.stop_and_record();
    4024            7 :         Ok(logical_size)
    4025            7 :     }
    4026              : 
    4027              :     /// Update current logical size, adding `delta' to the old value.
    4028       135285 :     fn update_current_logical_size(&self, delta: i64) {
    4029       135285 :         let logical_size = &self.current_logical_size;
    4030       135285 :         logical_size.increment_size(delta);
    4031              : 
    4032              :         // Also set the value in the prometheus gauge. Note that
    4033              :         // there is a race condition here: if this is is called by two
    4034              :         // threads concurrently, the prometheus gauge might be set to
    4035              :         // one value while current_logical_size is set to the
    4036              :         // other.
    4037       135285 :         match logical_size.current_size() {
    4038       135285 :             CurrentLogicalSize::Exact(ref new_current_size) => self
    4039       135285 :                 .metrics
    4040       135285 :                 .current_logical_size_gauge
    4041       135285 :                 .set(new_current_size.into()),
    4042            0 :             CurrentLogicalSize::Approximate(_) => {
    4043            0 :                 // don't update the gauge yet, this allows us not to update the gauge back and
    4044            0 :                 // forth between the initial size calculation task.
    4045            0 :             }
    4046              :         }
    4047       135285 :     }
    4048              : 
    4049         1527 :     pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: MetricsUpdate) {
    4050              :         // TODO: this directory metrics is not correct -- we could have multiple reldirs in the system
    4051              :         // for each of the database, but we only store one value, and therefore each pgdirmodification
    4052              :         // would overwrite the previous value if they modify different databases.
    4053              : 
    4054         1527 :         match count {
    4055          566 :             MetricsUpdate::Set(count) => {
    4056          566 :                 self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
    4057          566 :                 self.directory_metrics_inited[kind.offset()].store(true, AtomicOrdering::Relaxed);
    4058          566 :             }
    4059          960 :             MetricsUpdate::Add(count) => {
    4060              :                 // TODO: these operations are not atomic; but we only have one writer to the metrics, so
    4061              :                 // it's fine.
    4062          960 :                 if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
    4063          960 :                     // The metrics has been initialized with `MetricsUpdate::Set` before, so we can add/sub
    4064          960 :                     // the value reliably.
    4065          960 :                     self.directory_metrics[kind.offset()].fetch_add(count, AtomicOrdering::Relaxed);
    4066          960 :                 }
    4067              :                 // Otherwise, ignore this update
    4068              :             }
    4069            1 :             MetricsUpdate::Sub(count) => {
    4070              :                 // TODO: these operations are not atomic; but we only have one writer to the metrics, so
    4071              :                 // it's fine.
    4072            1 :                 if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
    4073            1 :                     // The metrics has been initialized with `MetricsUpdate::Set` before.
    4074            1 :                     // The operation could overflow so we need to normalize the value.
    4075            1 :                     let prev_val =
    4076            1 :                         self.directory_metrics[kind.offset()].load(AtomicOrdering::Relaxed);
    4077            1 :                     let res = prev_val.saturating_sub(count);
    4078            1 :                     self.directory_metrics[kind.offset()].store(res, AtomicOrdering::Relaxed);
    4079            1 :                 }
    4080              :                 // Otherwise, ignore this update
    4081              :             }
    4082              :         };
    4083              : 
    4084              :         // TODO: remove this, there's no place in the code that updates this aux metrics.
    4085         1527 :         let aux_metric =
    4086         1527 :             self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
    4087              : 
    4088         1527 :         let sum_of_entries = self
    4089         1527 :             .directory_metrics
    4090         1527 :             .iter()
    4091        12216 :             .map(|v| v.load(AtomicOrdering::Relaxed))
    4092         1527 :             .sum();
    4093              :         // Set a high general threshold and a lower threshold for the auxiliary files,
    4094              :         // as we can have large numbers of relations in the db directory.
    4095              :         const SUM_THRESHOLD: u64 = 5000;
    4096              :         const AUX_THRESHOLD: u64 = 1000;
    4097         1527 :         if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
    4098            0 :             self.metrics
    4099            0 :                 .directory_entries_count_gauge
    4100            0 :                 .set(sum_of_entries);
    4101         1527 :         } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
    4102            0 :             metric.set(sum_of_entries);
    4103         1527 :         }
    4104         1527 :     }
    4105              : 
    4106            0 :     async fn find_layer(
    4107            0 :         &self,
    4108            0 :         layer_name: &LayerName,
    4109            0 :     ) -> Result<Option<Layer>, layer_manager::Shutdown> {
    4110            0 :         let guard = self
    4111            0 :             .layers
    4112            0 :             .read(LayerManagerLockHolder::GetLayerMapInfo)
    4113            0 :             .await;
    4114            0 :         let layer = guard
    4115            0 :             .layer_map()?
    4116            0 :             .iter_historic_layers()
    4117            0 :             .find(|l| &l.layer_name() == layer_name)
    4118            0 :             .map(|found| guard.get_from_desc(&found));
    4119            0 :         Ok(layer)
    4120            0 :     }
    4121              : 
    4122            0 :     pub(super) fn should_keep_previous_heatmap(&self, new_heatmap_end_lsn: Lsn) -> bool {
    4123            0 :         let crnt = self.previous_heatmap.load();
    4124            0 :         match crnt.as_deref() {
    4125            0 :             Some(PreviousHeatmap::Active { end_lsn, .. }) => match end_lsn {
    4126            0 :                 Some(crnt_end_lsn) => *crnt_end_lsn > new_heatmap_end_lsn,
    4127            0 :                 None => true,
    4128              :             },
    4129            0 :             Some(PreviousHeatmap::Obsolete) => false,
    4130            0 :             None => false,
    4131              :         }
    4132            0 :     }
    4133              : 
    4134              :     /// The timeline heatmap is a hint to secondary locations from the primary location,
    4135              :     /// indicating which layers are currently on-disk on the primary.
    4136              :     ///
    4137              :     /// None is returned if the Timeline is in a state where uploading a heatmap
    4138              :     /// doesn't make sense, such as shutting down or initializing.  The caller
    4139              :     /// should treat this as a cue to simply skip doing any heatmap uploading
    4140              :     /// for this timeline.
    4141            8 :     pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
    4142            8 :         if !self.is_active() {
    4143            0 :             return None;
    4144            8 :         }
    4145              : 
    4146            8 :         let guard = self
    4147            8 :             .layers
    4148            8 :             .read(LayerManagerLockHolder::GenerateHeatmap)
    4149            8 :             .await;
    4150              : 
    4151              :         // Firstly, if there's any heatmap left over from when this location
    4152              :         // was a secondary, take that into account. Keep layers that are:
    4153              :         // * present in the layer map
    4154              :         // * visible
    4155              :         // * non-resident
    4156              :         // * not evicted since we read the heatmap
    4157              :         //
    4158              :         // Without this, a new cold, attached location would clobber the previous
    4159              :         // heatamp.
    4160            8 :         let previous_heatmap = self.previous_heatmap.load();
    4161            8 :         let visible_non_resident = match previous_heatmap.as_deref() {
    4162              :             Some(PreviousHeatmap::Active {
    4163            6 :                 heatmap, read_at, ..
    4164           23 :             }) => Some(heatmap.all_layers().filter_map(|hl| {
    4165           23 :                 let desc: PersistentLayerDesc = hl.name.clone().into();
    4166           23 :                 let layer = guard.try_get_from_key(&desc.key())?;
    4167              : 
    4168           23 :                 if layer.visibility() == LayerVisibilityHint::Covered {
    4169            0 :                     return None;
    4170           23 :                 }
    4171              : 
    4172           23 :                 if layer.is_likely_resident() {
    4173           10 :                     return None;
    4174           13 :                 }
    4175              : 
    4176           13 :                 if layer.last_evicted_at().happened_after(*read_at) {
    4177            3 :                     return None;
    4178           10 :                 }
    4179              : 
    4180           10 :                 Some((desc, hl.metadata.clone(), hl.access_time, hl.cold))
    4181           23 :             })),
    4182            0 :             Some(PreviousHeatmap::Obsolete) => None,
    4183            2 :             None => None,
    4184              :         };
    4185              : 
    4186              :         // Secondly, all currently visible, resident layers are included.
    4187           18 :         let resident = guard.likely_resident_layers().filter_map(|layer| {
    4188           18 :             match layer.visibility() {
    4189              :                 LayerVisibilityHint::Visible => {
    4190              :                     // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
    4191           17 :                     let last_activity_ts = layer.latest_activity();
    4192           17 :                     Some((
    4193           17 :                         layer.layer_desc().clone(),
    4194           17 :                         layer.metadata(),
    4195           17 :                         last_activity_ts,
    4196           17 :                         false, // these layers are not cold
    4197           17 :                     ))
    4198              :                 }
    4199              :                 LayerVisibilityHint::Covered => {
    4200              :                     // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
    4201            1 :                     None
    4202              :                 }
    4203              :             }
    4204           18 :         });
    4205              : 
    4206            8 :         let mut layers = match visible_non_resident {
    4207            6 :             Some(non_resident) => {
    4208            6 :                 let mut non_resident = non_resident.peekable();
    4209            6 :                 if non_resident.peek().is_none() {
    4210            2 :                     tracing::info!(timeline_id=%self.timeline_id, "Previous heatmap now obsolete");
    4211            2 :                     self.previous_heatmap
    4212            2 :                         .store(Some(PreviousHeatmap::Obsolete.into()));
    4213            4 :                 }
    4214              : 
    4215            6 :                 non_resident.chain(resident).collect::<Vec<_>>()
    4216              :             }
    4217            2 :             None => resident.collect::<Vec<_>>(),
    4218              :         };
    4219              : 
    4220              :         // Sort layers in order of which to download first.  For a large set of layers to download, we
    4221              :         // want to prioritize those layers which are most likely to still be in the resident many minutes
    4222              :         // or hours later:
    4223              :         // - Cold layers go last for convenience when a human inspects the heatmap.
    4224              :         // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
    4225              :         //   only exist for a few minutes before being compacted into L1s.
    4226              :         // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
    4227              :         //   the layer is likely to be covered by an image layer during compaction.
    4228           60 :         layers.sort_by_key(|(desc, _meta, _atime, cold)| {
    4229           60 :             std::cmp::Reverse((
    4230           60 :                 *cold,
    4231           60 :                 !LayerMap::is_l0(&desc.key_range, desc.is_delta),
    4232           60 :                 desc.lsn_range.end,
    4233           60 :             ))
    4234           60 :         });
    4235              : 
    4236            8 :         let layers = layers
    4237            8 :             .into_iter()
    4238           27 :             .map(|(desc, meta, atime, cold)| {
    4239           27 :                 HeatMapLayer::new(desc.layer_name(), meta, atime, cold)
    4240           27 :             })
    4241            8 :             .collect();
    4242              : 
    4243            8 :         Some(HeatMapTimeline::new(self.timeline_id, layers))
    4244            8 :     }
    4245              : 
    4246            0 :     pub(super) async fn generate_unarchival_heatmap(&self, end_lsn: Lsn) -> PreviousHeatmap {
    4247            0 :         let guard = self
    4248            0 :             .layers
    4249            0 :             .read(LayerManagerLockHolder::GenerateHeatmap)
    4250            0 :             .await;
    4251              : 
    4252            0 :         let now = SystemTime::now();
    4253            0 :         let mut heatmap_layers = Vec::default();
    4254            0 :         for vl in guard.visible_layers() {
    4255            0 :             if vl.layer_desc().get_lsn_range().start >= end_lsn {
    4256            0 :                 continue;
    4257            0 :             }
    4258              : 
    4259            0 :             let hl = HeatMapLayer {
    4260            0 :                 name: vl.layer_desc().layer_name(),
    4261            0 :                 metadata: vl.metadata(),
    4262            0 :                 access_time: now,
    4263            0 :                 cold: true,
    4264            0 :             };
    4265            0 :             heatmap_layers.push(hl);
    4266              :         }
    4267              : 
    4268            0 :         tracing::info!(
    4269            0 :             "Generating unarchival heatmap with {} layers",
    4270            0 :             heatmap_layers.len()
    4271              :         );
    4272              : 
    4273            0 :         let heatmap = HeatMapTimeline::new(self.timeline_id, heatmap_layers);
    4274            0 :         PreviousHeatmap::Active {
    4275            0 :             heatmap,
    4276            0 :             read_at: Instant::now(),
    4277            0 :             end_lsn: Some(end_lsn),
    4278            0 :         }
    4279            0 :     }
    4280              : 
    4281              :     /// Returns true if the given lsn is or was an ancestor branchpoint.
    4282            0 :     pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
    4283              :         // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
    4284              :         // branchpoint in the value in IndexPart::lineage
    4285            0 :         self.ancestor_lsn == lsn
    4286            0 :             || (self.ancestor_lsn == Lsn::INVALID
    4287            0 :                 && self.remote_client.is_previous_ancestor_lsn(lsn))
    4288            0 :     }
    4289              : }
    4290              : 
    4291              : #[derive(Clone)]
    4292              : /// Type representing a query in the ([`Lsn`], [`Key`]) space.
    4293              : /// In other words, a set of segments in a 2D space.
    4294              : ///
    4295              : /// This representation has the advatange of avoiding hash map
    4296              : /// allocations for uniform queries.
    4297              : pub(crate) enum VersionedKeySpaceQuery {
    4298              :     /// Variant for queries at a single [`Lsn`]
    4299              :     Uniform { keyspace: KeySpace, lsn: Lsn },
    4300              :     /// Variant for queries at multiple [`Lsn`]s
    4301              :     Scattered {
    4302              :         keyspaces_at_lsn: Vec<(Lsn, KeySpace)>,
    4303              :     },
    4304              : }
    4305              : 
    4306              : impl VersionedKeySpaceQuery {
    4307       302222 :     pub(crate) fn uniform(keyspace: KeySpace, lsn: Lsn) -> Self {
    4308       302222 :         Self::Uniform { keyspace, lsn }
    4309       302222 :     }
    4310              : 
    4311        10192 :     pub(crate) fn scattered(keyspaces_at_lsn: Vec<(Lsn, KeySpace)>) -> Self {
    4312        10192 :         Self::Scattered { keyspaces_at_lsn }
    4313        10192 :     }
    4314              : 
    4315              :     /// Returns the most recent (largest) LSN included in the query.
    4316              :     /// If any of the LSNs included in the query are invalid, returns
    4317              :     /// an error instead.
    4318       624828 :     fn high_watermark_lsn(&self) -> Result<Lsn, GetVectoredError> {
    4319       624828 :         match self {
    4320       604444 :             Self::Uniform { lsn, .. } => {
    4321       604444 :                 if !lsn.is_valid() {
    4322            0 :                     return Err(GetVectoredError::InvalidLsn(*lsn));
    4323       604444 :                 }
    4324              : 
    4325       604444 :                 Ok(*lsn)
    4326              :             }
    4327        20384 :             Self::Scattered { keyspaces_at_lsn } => {
    4328        20384 :                 let mut max_lsn = None;
    4329        42218 :                 for (lsn, _keyspace) in keyspaces_at_lsn.iter() {
    4330        42218 :                     if !lsn.is_valid() {
    4331            0 :                         return Err(GetVectoredError::InvalidLsn(*lsn));
    4332        42218 :                     }
    4333        42218 :                     max_lsn = std::cmp::max(max_lsn, Some(lsn));
    4334              :                 }
    4335              : 
    4336        20384 :                 if let Some(computed) = max_lsn {
    4337        20384 :                     Ok(*computed)
    4338              :                 } else {
    4339            0 :                     Err(GetVectoredError::Other(anyhow!("empty input")))
    4340              :                 }
    4341              :             }
    4342              :         }
    4343       624828 :     }
    4344              : 
    4345              :     /// Returns the total keyspace being queried: the result of projecting
    4346              :     /// everything in the key dimensions onto the key axis.
    4347       323491 :     fn total_keyspace(&self) -> KeySpace {
    4348       323491 :         match self {
    4349       303107 :             Self::Uniform { keyspace, .. } => keyspace.clone(),
    4350        20384 :             Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
    4351        20384 :                 .iter()
    4352        20384 :                 .map(|(_lsn, keyspace)| keyspace)
    4353        42218 :                 .fold(KeySpace::default(), |mut acc, v| {
    4354        42218 :                     acc.merge(v);
    4355        42218 :                     acc
    4356        42218 :                 }),
    4357              :         }
    4358       323491 :     }
    4359              : 
    4360              :     /// Returns LSN for a specific key.
    4361              :     ///
    4362              :     /// Invariant: requested key must be part of [`Self::total_keyspace`]
    4363       395535 :     pub(super) fn map_key_to_lsn(&self, key: &Key) -> Lsn {
    4364       395535 :         match self {
    4365       322343 :             Self::Uniform { lsn, .. } => *lsn,
    4366        73192 :             Self::Scattered { keyspaces_at_lsn } => {
    4367        73192 :                 keyspaces_at_lsn
    4368        73192 :                     .iter()
    4369       418280 :                     .find(|(_lsn, keyspace)| keyspace.contains(key))
    4370        73192 :                     .expect("Returned key was requested")
    4371              :                     .0
    4372              :             }
    4373              :         }
    4374       395535 :     }
    4375              : 
    4376              :     /// Remove any parts of the query (segments) which overlap with the provided
    4377              :     /// key space (also segments).
    4378       963769 :     fn remove_overlapping_with(&mut self, to_remove: &KeySpace) -> KeySpace {
    4379       963769 :         match self {
    4380       943385 :             Self::Uniform { keyspace, .. } => keyspace.remove_overlapping_with(to_remove),
    4381        20384 :             Self::Scattered { keyspaces_at_lsn } => {
    4382        20384 :                 let mut removed_accum = KeySpaceRandomAccum::new();
    4383        42218 :                 keyspaces_at_lsn.iter_mut().for_each(|(_lsn, keyspace)| {
    4384        42218 :                     let removed = keyspace.remove_overlapping_with(to_remove);
    4385        42218 :                     removed_accum.add_keyspace(removed);
    4386        42218 :                 });
    4387              : 
    4388        20384 :                 removed_accum.to_keyspace()
    4389              :             }
    4390              :         }
    4391       963769 :     }
    4392              : 
    4393       737808 :     fn is_empty(&self) -> bool {
    4394       737808 :         match self {
    4395       717424 :             Self::Uniform { keyspace, .. } => keyspace.is_empty(),
    4396        20384 :             Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
    4397        20384 :                 .iter()
    4398        31301 :                 .all(|(_lsn, keyspace)| keyspace.is_empty()),
    4399              :         }
    4400       737808 :     }
    4401              : 
    4402              :     /// "Lower" the query on the LSN dimension
    4403       112981 :     fn lower(&mut self, to: Lsn) {
    4404       112981 :         match self {
    4405       112981 :             Self::Uniform { lsn, .. } => {
    4406       112981 :                 // If the originally requested LSN is smaller than the starting
    4407       112981 :                 // LSN of the ancestor we are descending into, we need to respect that.
    4408       112981 :                 // Hence the min.
    4409       112981 :                 *lsn = std::cmp::min(*lsn, to);
    4410       112981 :             }
    4411            0 :             Self::Scattered { keyspaces_at_lsn } => {
    4412            0 :                 keyspaces_at_lsn.iter_mut().for_each(|(lsn, _keyspace)| {
    4413            0 :                     *lsn = std::cmp::min(*lsn, to);
    4414            0 :                 });
    4415              :             }
    4416              :         }
    4417       112981 :     }
    4418              : }
    4419              : 
    4420              : impl std::fmt::Display for VersionedKeySpaceQuery {
    4421            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    4422            0 :         write!(f, "[")?;
    4423              : 
    4424            0 :         match self {
    4425            0 :             VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
    4426            0 :                 write!(f, "{keyspace} @ {lsn}")?;
    4427              :             }
    4428            0 :             VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
    4429            0 :                 for (lsn, keyspace) in keyspaces_at_lsn.iter() {
    4430            0 :                     write!(f, "{keyspace} @ {lsn},")?;
    4431              :                 }
    4432              :             }
    4433              :         }
    4434              : 
    4435            0 :         write!(f, "]")
    4436            0 :     }
    4437              : }
    4438              : 
    4439              : impl Timeline {
    4440              :     #[allow(clippy::doc_lazy_continuation)]
    4441              :     /// Get the data needed to reconstruct all keys in the provided keyspace
    4442              :     ///
    4443              :     /// The algorithm is as follows:
    4444              :     /// 1.   While some keys are still not done and there's a timeline to visit:
    4445              :     /// 2.   Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
    4446              :     /// 2.1: Build the fringe for the current keyspace
    4447              :     /// 2.2  Visit the newest layer from the fringe to collect all values for the range it
    4448              :     ///      intersects
    4449              :     /// 2.3. Pop the timeline from the fringe
    4450              :     /// 2.4. If the fringe is empty, go back to 1
    4451       312414 :     async fn get_vectored_reconstruct_data(
    4452       312414 :         &self,
    4453       312414 :         mut query: VersionedKeySpaceQuery,
    4454       312414 :         reconstruct_state: &mut ValuesReconstructState,
    4455       312414 :         ctx: &RequestContext,
    4456       312414 :     ) -> Result<(), GetVectoredError> {
    4457       312414 :         let original_hwm_lsn = query.high_watermark_lsn().unwrap();
    4458              : 
    4459              :         let mut timeline_owned: Arc<Timeline>;
    4460       312414 :         let mut timeline = self;
    4461              : 
    4462       312413 :         let missing_keyspace = loop {
    4463       425394 :             if self.cancel.is_cancelled() {
    4464            0 :                 return Err(GetVectoredError::Cancelled);
    4465       425394 :             }
    4466              : 
    4467              :             let TimelineVisitOutcome {
    4468       425394 :                 completed_keyspace: completed,
    4469       425394 :                 image_covered_keyspace,
    4470              :             } = {
    4471       425394 :                 let ctx = RequestContextBuilder::from(ctx)
    4472       425394 :                     .perf_span(|crnt_perf_span| {
    4473            0 :                         info_span!(
    4474              :                             target: PERF_TRACE_TARGET,
    4475            0 :                             parent: crnt_perf_span,
    4476              :                             "PLAN_IO_TIMELINE",
    4477              :                             timeline = %timeline.timeline_id,
    4478            0 :                             high_watermark_lsn = %query.high_watermark_lsn().unwrap(),
    4479              :                         )
    4480            0 :                     })
    4481       425394 :                     .attached_child();
    4482              : 
    4483       425394 :                 Self::get_vectored_reconstruct_data_timeline(
    4484       425394 :                     timeline,
    4485       425394 :                     &query,
    4486       425394 :                     reconstruct_state,
    4487       425394 :                     &self.cancel,
    4488       425394 :                     &ctx,
    4489              :                 )
    4490       425394 :                 .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
    4491       425394 :                 .await?
    4492              :             };
    4493              : 
    4494       425394 :             query.remove_overlapping_with(&completed);
    4495              : 
    4496              :             // Do not descend into the ancestor timeline for aux files.
    4497              :             // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
    4498              :             // stalling compaction.
    4499       425394 :             query.remove_overlapping_with(&KeySpace {
    4500       425394 :                 ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
    4501       425394 :             });
    4502              : 
    4503              :             // Keyspace is fully retrieved
    4504       425394 :             if query.is_empty() {
    4505       312230 :                 break None;
    4506       113164 :             }
    4507              : 
    4508       113164 :             let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
    4509              :                 // Not fully retrieved but no ancestor timeline.
    4510          183 :                 break Some(query.total_keyspace());
    4511              :             };
    4512              : 
    4513              :             // Now we see if there are keys covered by the image layer but does not exist in the
    4514              :             // image layer, which means that the key does not exist.
    4515              : 
    4516              :             // The block below will stop the vectored search if any of the keys encountered an image layer
    4517              :             // which did not contain a snapshot for said key. Since we have already removed all completed
    4518              :             // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
    4519              :             // space. If that's not the case, we had at least one key encounter a gap in the image layer
    4520              :             // and stop the search as a result of that.
    4521       112981 :             let mut removed = query.remove_overlapping_with(&image_covered_keyspace);
    4522              :             // Do not fire missing key error and end early for sparse keys. Note that we hava already removed
    4523              :             // non-inherited keyspaces before, so we can safely do a full `SPARSE_RANGE` remove instead of
    4524              :             // figuring out what is the inherited key range and do a fine-grained pruning.
    4525       112981 :             removed.remove_overlapping_with(&KeySpace {
    4526       112981 :                 ranges: vec![SPARSE_RANGE],
    4527       112981 :             });
    4528       112981 :             if !removed.is_empty() {
    4529            0 :                 break Some(removed);
    4530       112981 :             }
    4531              : 
    4532              :             // Each key range in the original query is at some point in the LSN space.
    4533              :             // When descending into the ancestor, lower all ranges in the LSN space
    4534              :             // such that new changes on the parent timeline are not visible.
    4535       112981 :             query.lower(timeline.ancestor_lsn);
    4536              : 
    4537       112981 :             let ctx = RequestContextBuilder::from(ctx)
    4538       112981 :                 .perf_span(|crnt_perf_span| {
    4539            0 :                     info_span!(
    4540              :                         target: PERF_TRACE_TARGET,
    4541            0 :                         parent: crnt_perf_span,
    4542              :                         "GET_ANCESTOR",
    4543              :                         timeline = %timeline.timeline_id,
    4544            0 :                         ancestor = %ancestor_timeline.timeline_id,
    4545              :                         ancestor_lsn = %timeline.ancestor_lsn
    4546              :                     )
    4547            0 :                 })
    4548       112981 :                 .attached_child();
    4549              : 
    4550       112981 :             timeline_owned = timeline
    4551       112981 :                 .get_ready_ancestor_timeline(ancestor_timeline, &ctx)
    4552       112981 :                 .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
    4553       112981 :                 .await?;
    4554       112980 :             timeline = &*timeline_owned;
    4555              :         };
    4556              : 
    4557              :         // Remove sparse keys from the keyspace so that it doesn't fire errors.
    4558       312413 :         let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
    4559          183 :             let mut missing_keyspace = missing_keyspace;
    4560          183 :             missing_keyspace.remove_overlapping_with(&KeySpace {
    4561          183 :                 ranges: vec![SPARSE_RANGE],
    4562          183 :             });
    4563          183 :             if missing_keyspace.is_empty() {
    4564          176 :                 None
    4565              :             } else {
    4566            7 :                 Some(missing_keyspace)
    4567              :             }
    4568              :         } else {
    4569       312230 :             None
    4570              :         };
    4571              : 
    4572       312413 :         if let Some(missing_keyspace) = missing_keyspace {
    4573            7 :             return Err(GetVectoredError::MissingKey(Box::new(MissingKeyError {
    4574            7 :                 keyspace: missing_keyspace, /* better if we can store the full keyspace */
    4575            7 :                 shard: self.shard_identity.number,
    4576            7 :                 original_hwm_lsn,
    4577            7 :                 ancestor_lsn: Some(timeline.ancestor_lsn),
    4578            7 :                 backtrace: None,
    4579            7 :                 read_path: std::mem::take(&mut reconstruct_state.read_path),
    4580            7 :                 query: None,
    4581            7 :             })));
    4582       312406 :         }
    4583              : 
    4584       312406 :         Ok(())
    4585       312414 :     }
    4586              : 
    4587       425394 :     async fn get_vectored_init_fringe(
    4588       425394 :         &self,
    4589       425394 :         query: &VersionedKeySpaceQuery,
    4590       425394 :     ) -> Result<LayerFringe, GetVectoredError> {
    4591       425394 :         let mut fringe = LayerFringe::new();
    4592       425394 :         let guard = self.layers.read(LayerManagerLockHolder::GetPage).await;
    4593              : 
    4594       425394 :         match query {
    4595       415202 :             VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
    4596              :                 // LSNs requested by the compute or determined by the pageserver
    4597              :                 // are inclusive. Queries to the layer map use exclusive LSNs.
    4598              :                 // Hence, bump the value before the query - same in the other
    4599              :                 // match arm.
    4600       415202 :                 let cont_lsn = Lsn(lsn.0 + 1);
    4601       415202 :                 guard.update_search_fringe(keyspace, cont_lsn, &mut fringe)?;
    4602              :             }
    4603        10192 :             VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
    4604        21109 :                 for (lsn, keyspace) in keyspaces_at_lsn.iter() {
    4605        21109 :                     let cont_lsn_for_keyspace = Lsn(lsn.0 + 1);
    4606        21109 :                     guard.update_search_fringe(keyspace, cont_lsn_for_keyspace, &mut fringe)?;
    4607              :                 }
    4608              :             }
    4609              :         }
    4610              : 
    4611       425394 :         Ok(fringe)
    4612       425394 :     }
    4613              : 
    4614              :     /// Collect the reconstruct data for a keyspace from the specified timeline.
    4615              :     ///
    4616              :     /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
    4617              :     /// the current keyspace. The current keyspace of the search at any given timeline
    4618              :     /// is the original keyspace minus all the keys that have been completed minus
    4619              :     /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
    4620              :     /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
    4621              :     ///
    4622              :     /// This is basically a depth-first search visitor implementation where a vertex
    4623              :     /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
    4624              :     ///
    4625              :     /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
    4626              :     /// and get all the required reconstruct data from the layer in one go.
    4627              :     ///
    4628              :     /// Returns the completed keyspace and the keyspaces with image coverage. The caller
    4629              :     /// decides how to deal with these two keyspaces.
    4630       425394 :     async fn get_vectored_reconstruct_data_timeline(
    4631       425394 :         timeline: &Timeline,
    4632       425394 :         query: &VersionedKeySpaceQuery,
    4633       425394 :         reconstruct_state: &mut ValuesReconstructState,
    4634       425394 :         cancel: &CancellationToken,
    4635       425394 :         ctx: &RequestContext,
    4636       425394 :     ) -> Result<TimelineVisitOutcome, GetVectoredError> {
    4637              :         // Prevent GC from progressing while visiting the current timeline.
    4638              :         // If we are GC-ing because a new image layer was added while traversing
    4639              :         // the timeline, then it will remove layers that are required for fulfilling
    4640              :         // the current get request (read-path cannot "look back" and notice the new
    4641              :         // image layer).
    4642       425394 :         let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
    4643              : 
    4644              :         // See `compaction::compact_with_gc` for why we need this.
    4645       425394 :         let _guard = timeline.gc_compaction_layer_update_lock.read().await;
    4646              : 
    4647              :         // Initialize the fringe
    4648       425394 :         let mut fringe = timeline.get_vectored_init_fringe(query).await?;
    4649              : 
    4650       425394 :         let mut completed_keyspace = KeySpace::default();
    4651       425394 :         let mut image_covered_keyspace = KeySpaceRandomAccum::new();
    4652              : 
    4653       871556 :         while let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
    4654       446162 :             if cancel.is_cancelled() {
    4655            0 :                 return Err(GetVectoredError::Cancelled);
    4656       446162 :             }
    4657              : 
    4658       446162 :             if let Some(ref mut read_path) = reconstruct_state.read_path {
    4659       446162 :                 read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
    4660       446162 :             }
    4661              : 
    4662              :             // Visit the layer and plan IOs for it
    4663       446162 :             let next_cont_lsn = lsn_range.start;
    4664       446162 :             layer_to_read
    4665       446162 :                 .get_values_reconstruct_data(
    4666       446162 :                     keyspace_to_read.clone(),
    4667       446162 :                     lsn_range,
    4668       446162 :                     reconstruct_state,
    4669       446162 :                     ctx,
    4670       446162 :                 )
    4671       446162 :                 .await?;
    4672              : 
    4673       446162 :             let mut unmapped_keyspace = keyspace_to_read;
    4674       446162 :             let cont_lsn = next_cont_lsn;
    4675              : 
    4676       446162 :             reconstruct_state.on_layer_visited(&layer_to_read);
    4677              : 
    4678       446162 :             let (keys_done_last_step, keys_with_image_coverage) =
    4679       446162 :                 reconstruct_state.consume_done_keys();
    4680       446162 :             unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
    4681       446162 :             completed_keyspace.merge(&keys_done_last_step);
    4682       446162 :             if let Some(keys_with_image_coverage) = keys_with_image_coverage {
    4683        14122 :                 unmapped_keyspace
    4684        14122 :                     .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
    4685        14122 :                 image_covered_keyspace.add_range(keys_with_image_coverage);
    4686       432040 :             }
    4687              : 
    4688              :             // Query the layer map for the next layers to read.
    4689              :             //
    4690              :             // Do not descent any further if the last layer we visited
    4691              :             // completed all keys in the keyspace it inspected. This is not
    4692              :             // required for correctness, but avoids visiting extra layers
    4693              :             // which turns out to be a perf bottleneck in some cases.
    4694       446162 :             if !unmapped_keyspace.is_empty() {
    4695       129478 :                 let guard = timeline.layers.read(LayerManagerLockHolder::GetPage).await;
    4696       129478 :                 guard.update_search_fringe(&unmapped_keyspace, cont_lsn, &mut fringe)?;
    4697              : 
    4698              :                 // It's safe to drop the layer map lock after planning the next round of reads.
    4699              :                 // The fringe keeps readable handles for the layers which are safe to read even
    4700              :                 // if layers were compacted or flushed.
    4701              :                 //
    4702              :                 // The more interesting consideration is: "Why is the read algorithm still correct
    4703              :                 // if the layer map changes while it is operating?". Doing a vectored read on a
    4704              :                 // timeline boils down to pushing an imaginary lsn boundary downwards for each range
    4705              :                 // covered by the read. The layer map tells us how to move the lsn downwards for a
    4706              :                 // range at *a particular point in time*. It is fine for the answer to be different
    4707              :                 // at two different time points.
    4708       129478 :                 drop(guard);
    4709       316684 :             }
    4710              :         }
    4711              : 
    4712       425394 :         Ok(TimelineVisitOutcome {
    4713       425394 :             completed_keyspace,
    4714       425394 :             image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
    4715       425394 :         })
    4716       425394 :     }
    4717              : 
    4718       112981 :     async fn get_ready_ancestor_timeline(
    4719       112981 :         &self,
    4720       112981 :         ancestor: &Arc<Timeline>,
    4721       112981 :         ctx: &RequestContext,
    4722       112981 :     ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
    4723              :         // It's possible that the ancestor timeline isn't active yet, or
    4724              :         // is active but hasn't yet caught up to the branch point. Wait
    4725              :         // for it.
    4726              :         //
    4727              :         // This cannot happen while the pageserver is running normally,
    4728              :         // because you cannot create a branch from a point that isn't
    4729              :         // present in the pageserver yet. However, we don't wait for the
    4730              :         // branch point to be uploaded to cloud storage before creating
    4731              :         // a branch. I.e., the branch LSN need not be remote consistent
    4732              :         // for the branching operation to succeed.
    4733              :         //
    4734              :         // Hence, if we try to load a tenant in such a state where
    4735              :         // 1. the existence of the branch was persisted (in IndexPart and/or locally)
    4736              :         // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
    4737              :         // then we will need to wait for the ancestor timeline to
    4738              :         // re-stream WAL up to branch_lsn before we access it.
    4739              :         //
    4740              :         // How can a tenant get in such a state?
    4741              :         // - ungraceful pageserver process exit
    4742              :         // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
    4743              :         //
    4744              :         // NB: this could be avoided by requiring
    4745              :         //   branch_lsn >= remote_consistent_lsn
    4746              :         // during branch creation.
    4747       112981 :         match ancestor.wait_to_become_active(ctx).await {
    4748       112980 :             Ok(()) => {}
    4749              :             Err(TimelineState::Stopping) => {
    4750              :                 // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
    4751            0 :                 return Err(GetReadyAncestorError::Cancelled);
    4752              :             }
    4753            1 :             Err(state) => {
    4754            1 :                 return Err(GetReadyAncestorError::BadState {
    4755            1 :                     timeline_id: ancestor.timeline_id,
    4756            1 :                     state,
    4757            1 :                 });
    4758              :             }
    4759              :         }
    4760       112980 :         ancestor
    4761       112980 :             .wait_lsn(
    4762       112980 :                 self.ancestor_lsn,
    4763       112980 :                 WaitLsnWaiter::Timeline(self),
    4764       112980 :                 WaitLsnTimeout::Default,
    4765       112980 :                 ctx,
    4766       112980 :             )
    4767       112980 :             .await
    4768       112980 :             .map_err(|e| match e {
    4769            0 :                 e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
    4770            0 :                 WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
    4771            0 :                 WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
    4772            0 :                     timeline_id: ancestor.timeline_id,
    4773            0 :                     state,
    4774            0 :                 },
    4775            0 :             })?;
    4776              : 
    4777       112980 :         Ok(ancestor.clone())
    4778       112981 :     }
    4779              : 
    4780       148592 :     pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
    4781       148592 :         &self.shard_identity
    4782       148592 :     }
    4783              : 
    4784              :     #[inline(always)]
    4785            0 :     pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
    4786            0 :         ShardTimelineId {
    4787            0 :             shard_index: ShardIndex {
    4788            0 :                 shard_number: self.shard_identity.number,
    4789            0 :                 shard_count: self.shard_identity.count,
    4790            0 :             },
    4791            0 :             timeline_id: self.timeline_id,
    4792            0 :         }
    4793            0 :     }
    4794              : 
    4795              :     /// Returns a non-frozen open in-memory layer for ingestion.
    4796              :     ///
    4797              :     /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
    4798              :     /// this function without holding the mutex.
    4799          660 :     async fn get_layer_for_write(
    4800          660 :         &self,
    4801          660 :         lsn: Lsn,
    4802          660 :         _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    4803          660 :         ctx: &RequestContext,
    4804          660 :     ) -> anyhow::Result<Arc<InMemoryLayer>> {
    4805          660 :         let mut guard = self
    4806          660 :             .layers
    4807          660 :             .write(LayerManagerLockHolder::GetLayerForWrite)
    4808          660 :             .await;
    4809              : 
    4810          660 :         let last_record_lsn = self.get_last_record_lsn();
    4811          660 :         ensure!(
    4812          660 :             lsn > last_record_lsn,
    4813            0 :             "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
    4814              :             lsn,
    4815              :             last_record_lsn,
    4816              :         );
    4817              : 
    4818          660 :         let layer = guard
    4819          660 :             .open_mut()?
    4820          660 :             .get_layer_for_write(
    4821          660 :                 lsn,
    4822          660 :                 self.conf,
    4823          660 :                 self.timeline_id,
    4824          660 :                 self.tenant_shard_id,
    4825          660 :                 &self.gate,
    4826          660 :                 &self.cancel,
    4827          660 :                 ctx,
    4828              :             )
    4829          660 :             .await?;
    4830          660 :         Ok(layer)
    4831          660 :     }
    4832              : 
    4833      2639560 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    4834      2639560 :         assert!(new_lsn.is_aligned());
    4835              : 
    4836      2639560 :         self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
    4837      2639560 :         self.last_record_lsn.advance(new_lsn);
    4838      2639560 :     }
    4839              : 
    4840              :     /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
    4841              :     ///
    4842              :     /// Unconditional flush loop notification is given because in sharded cases we will want to
    4843              :     /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
    4844          611 :     async fn freeze_inmem_layer_at(
    4845          611 :         &self,
    4846          611 :         at: Lsn,
    4847          611 :         write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    4848          611 :     ) -> Result<u64, FlushLayerError> {
    4849          611 :         let frozen = {
    4850          611 :             let mut guard = self
    4851          611 :                 .layers
    4852          611 :                 .write(LayerManagerLockHolder::TryFreezeLayer)
    4853          611 :                 .await;
    4854          611 :             guard
    4855          611 :                 .open_mut()?
    4856          611 :                 .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
    4857          611 :                 .await
    4858              :         };
    4859              : 
    4860          611 :         if frozen {
    4861          597 :             let now = Instant::now();
    4862          597 :             *(self.last_freeze_ts.write().unwrap()) = now;
    4863          597 :         }
    4864              : 
    4865              :         // Increment the flush cycle counter and wake up the flush task.
    4866              :         // Remember the new value, so that when we listen for the flush
    4867              :         // to finish, we know when the flush that we initiated has
    4868              :         // finished, instead of some other flush that was started earlier.
    4869          611 :         let mut my_flush_request = 0;
    4870              : 
    4871          611 :         let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
    4872          611 :         if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
    4873            0 :             return Err(FlushLayerError::NotRunning(flush_loop_state));
    4874          611 :         }
    4875              : 
    4876          611 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    4877          611 :             my_flush_request = *counter + 1;
    4878          611 :             *counter = my_flush_request;
    4879          611 :             *lsn = std::cmp::max(at, *lsn);
    4880          611 :         });
    4881              : 
    4882          611 :         assert_ne!(my_flush_request, 0);
    4883              : 
    4884          611 :         Ok(my_flush_request)
    4885          611 :     }
    4886              : 
    4887              :     /// Layer flusher task's main loop.
    4888          232 :     async fn flush_loop(
    4889          232 :         self: &Arc<Self>,
    4890          232 :         mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
    4891          232 :         ctx: &RequestContext,
    4892          232 :     ) {
    4893              :         // Always notify waiters about the flush loop exiting since the loop might stop
    4894              :         // when the timeline hasn't been cancelled.
    4895          232 :         let scopeguard_rx = layer_flush_start_rx.clone();
    4896          232 :         scopeguard::defer! {
    4897              :             let (flush_counter, _) = *scopeguard_rx.borrow();
    4898              :             let _ = self
    4899              :                 .layer_flush_done_tx
    4900              :                 .send_replace((flush_counter, Err(FlushLayerError::Cancelled)));
    4901              :         }
    4902              : 
    4903              :         // Subscribe to L0 delta layer updates, for compaction backpressure.
    4904          232 :         let mut watch_l0 = match self
    4905          232 :             .layers
    4906          232 :             .read(LayerManagerLockHolder::FlushLoop)
    4907          232 :             .await
    4908          232 :             .layer_map()
    4909              :         {
    4910          232 :             Ok(lm) => lm.watch_level0_deltas(),
    4911            0 :             Err(Shutdown) => return,
    4912              :         };
    4913              : 
    4914          232 :         info!("started flush loop");
    4915              :         loop {
    4916          830 :             tokio::select! {
    4917          830 :                 _ = self.cancel.cancelled() => {
    4918            5 :                     info!("shutting down layer flush task due to Timeline::cancel");
    4919            5 :                     break;
    4920              :                 },
    4921          830 :                 _ = layer_flush_start_rx.changed() => {}
    4922              :             }
    4923          598 :             trace!("waking up");
    4924          598 :             let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
    4925              : 
    4926              :             // The highest LSN to which we flushed in the loop over frozen layers
    4927          598 :             let mut flushed_to_lsn = Lsn(0);
    4928              : 
    4929          598 :             let result = loop {
    4930         1194 :                 if self.cancel.is_cancelled() {
    4931            0 :                     info!("dropping out of flush loop for timeline shutdown");
    4932            0 :                     return;
    4933         1194 :                 }
    4934              : 
    4935              :                 // Break to notify potential waiters as soon as we've flushed the requested LSN. If
    4936              :                 // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
    4937         1194 :                 if flushed_to_lsn >= frozen_to_lsn {
    4938          583 :                     break Ok(());
    4939          611 :                 }
    4940              : 
    4941              :                 // Fetch the next layer to flush, if any.
    4942          611 :                 let (layer, l0_count, frozen_count, frozen_size, open_layer_size) = {
    4943          611 :                     let layers = self.layers.read(LayerManagerLockHolder::FlushLoop).await;
    4944          611 :                     let Ok(lm) = layers.layer_map() else {
    4945            0 :                         info!("dropping out of flush loop for timeline shutdown");
    4946            0 :                         return;
    4947              :                     };
    4948          611 :                     let l0_count = lm.level0_deltas().len();
    4949          611 :                     let frozen_count = lm.frozen_layers.len();
    4950          611 :                     let frozen_size: u64 = lm
    4951          611 :                         .frozen_layers
    4952          611 :                         .iter()
    4953          611 :                         .map(|l| l.estimated_in_mem_size())
    4954          611 :                         .sum();
    4955          611 :                     let open_layer_size: u64 = lm
    4956          611 :                         .open_layer
    4957          611 :                         .as_ref()
    4958          611 :                         .map(|l| l.estimated_in_mem_size())
    4959          611 :                         .unwrap_or(0);
    4960          611 :                     let layer = lm.frozen_layers.front().cloned();
    4961          611 :                     (layer, l0_count, frozen_count, frozen_size, open_layer_size)
    4962              :                     // drop 'layers' lock
    4963              :                 };
    4964          611 :                 let Some(layer) = layer else {
    4965           14 :                     break Ok(());
    4966              :                 };
    4967              : 
    4968              :                 // Stall flushes to backpressure if compaction can't keep up. This is propagated up
    4969              :                 // to WAL ingestion by having ephemeral layer rolls wait for flushes.
    4970          597 :                 if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
    4971            0 :                     if l0_count >= stall_threshold {
    4972            0 :                         warn!(
    4973            0 :                             "stalling layer flushes for compaction backpressure at {l0_count} \
    4974            0 :                             L0 layers ({frozen_count} frozen layers with {frozen_size} bytes, {open_layer_size} bytes in open layer)"
    4975              :                         );
    4976            0 :                         let stall_timer = self
    4977            0 :                             .metrics
    4978            0 :                             .flush_delay_histo
    4979            0 :                             .start_timer()
    4980            0 :                             .record_on_drop();
    4981            0 :                         tokio::select! {
    4982            0 :                             result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
    4983            0 :                                 if let Ok(l0) = result.as_deref() {
    4984            0 :                                     let delay = stall_timer.elapsed().as_secs_f64();
    4985            0 :                                     info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
    4986            0 :                                 }
    4987              :                             },
    4988            0 :                             _ = self.cancel.cancelled() => {},
    4989              :                         }
    4990            0 :                         continue; // check again
    4991            0 :                     }
    4992          597 :                 }
    4993              : 
    4994              :                 // Flush the layer.
    4995          597 :                 let flush_timer = self.metrics.flush_time_histo.start_timer();
    4996          597 :                 match self.flush_frozen_layer(layer, ctx).await {
    4997          596 :                     Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
    4998              :                     Err(FlushLayerError::Cancelled) => {
    4999            0 :                         info!("dropping out of flush loop for timeline shutdown");
    5000            0 :                         return;
    5001              :                     }
    5002            1 :                     err @ Err(
    5003              :                         FlushLayerError::NotRunning(_)
    5004              :                         | FlushLayerError::Other(_)
    5005              :                         | FlushLayerError::CreateImageLayersError(_),
    5006              :                     ) => {
    5007            1 :                         error!("could not flush frozen layer: {err:?}");
    5008            1 :                         break err.map(|_| ());
    5009              :                     }
    5010              :                 }
    5011          596 :                 let flush_duration = flush_timer.stop_and_record();
    5012              : 
    5013              :                 // Notify the tenant compaction loop if L0 compaction is needed.
    5014          596 :                 let l0_count = *watch_l0.borrow();
    5015          596 :                 if l0_count >= self.get_compaction_threshold() {
    5016          238 :                     self.l0_compaction_trigger.notify_one();
    5017          358 :                 }
    5018              : 
    5019              :                 // Delay the next flush to backpressure if compaction can't keep up. We delay by the
    5020              :                 // flush duration such that the flush takes 2x as long. This is propagated up to WAL
    5021              :                 // ingestion by having ephemeral layer rolls wait for flushes.
    5022          596 :                 if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
    5023            4 :                     if l0_count >= delay_threshold {
    5024            0 :                         let delay = flush_duration.as_secs_f64();
    5025            0 :                         info!(
    5026            0 :                             "delaying layer flush by {delay:.3}s for compaction backpressure at \
    5027            0 :                             {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes, {open_layer_size} bytes in open layer)"
    5028              :                         );
    5029            0 :                         let _delay_timer = self
    5030            0 :                             .metrics
    5031            0 :                             .flush_delay_histo
    5032            0 :                             .start_timer()
    5033            0 :                             .record_on_drop();
    5034            0 :                         tokio::select! {
    5035            0 :                             _ = tokio::time::sleep(flush_duration) => {},
    5036            0 :                             _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
    5037            0 :                             _ = self.cancel.cancelled() => {},
    5038              :                         }
    5039            4 :                     }
    5040          592 :                 }
    5041              :             };
    5042              : 
    5043              :             // Unsharded tenants should never advance their LSN beyond the end of the
    5044              :             // highest layer they write: such gaps between layer data and the frozen LSN
    5045              :             // are only legal on sharded tenants.
    5046          598 :             debug_assert!(
    5047          598 :                 self.shard_identity.count.count() > 1
    5048          591 :                     || flushed_to_lsn >= frozen_to_lsn
    5049           14 :                     || !flushed_to_lsn.is_valid()
    5050              :             );
    5051              : 
    5052          598 :             if flushed_to_lsn < frozen_to_lsn
    5053           15 :                 && self.shard_identity.count.count() > 1
    5054            1 :                 && result.is_ok()
    5055              :             {
    5056              :                 // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
    5057              :                 // to us via layer_flush_start_rx, then advance it here.
    5058              :                 //
    5059              :                 // This path is only taken for tenants with multiple shards: single sharded tenants should
    5060              :                 // never encounter a gap in the wal.
    5061            0 :                 let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
    5062            0 :                 tracing::debug!(
    5063            0 :                     "Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}"
    5064              :                 );
    5065            0 :                 if self.set_disk_consistent_lsn(frozen_to_lsn) {
    5066            0 :                     if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
    5067            0 :                         tracing::warn!(
    5068            0 :                             "Failed to schedule metadata upload after updating disk_consistent_lsn: {e}"
    5069              :                         );
    5070            0 :                     }
    5071            0 :                 }
    5072          598 :             }
    5073              : 
    5074              :             // Notify any listeners that we're done
    5075          598 :             let _ = self
    5076          598 :                 .layer_flush_done_tx
    5077          598 :                 .send_replace((flush_counter, result));
    5078              :         }
    5079            5 :     }
    5080              : 
    5081              :     /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
    5082          571 :     async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
    5083          571 :         let mut rx = self.layer_flush_done_tx.subscribe();
    5084              :         loop {
    5085              :             {
    5086         1161 :                 let (last_result_counter, last_result) = &*rx.borrow();
    5087         1161 :                 if *last_result_counter >= request {
    5088          571 :                     if let Err(err) = last_result {
    5089              :                         // We already logged the original error in
    5090              :                         // flush_loop. We cannot propagate it to the caller
    5091              :                         // here, because it might not be Cloneable
    5092            1 :                         return Err(err.clone());
    5093              :                     } else {
    5094          570 :                         return Ok(());
    5095              :                     }
    5096          590 :                 }
    5097              :             }
    5098          590 :             trace!("waiting for flush to complete");
    5099          590 :             tokio::select! {
    5100          590 :                 rx_e = rx.changed() => {
    5101          590 :                     rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
    5102              :                 },
    5103              :                 // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
    5104              :                 // the notification from [`flush_loop`] that it completed.
    5105          590 :                 _ = self.cancel.cancelled() => {
    5106            0 :                     tracing::info!("Cancelled layer flush due on timeline shutdown");
    5107            0 :                     return Ok(())
    5108              :                 }
    5109              :             };
    5110          590 :             trace!("done")
    5111              :         }
    5112          571 :     }
    5113              : 
    5114              :     /// Flush one frozen in-memory layer to disk, as a new delta layer.
    5115              :     ///
    5116              :     /// Return value is the last lsn (inclusive) of the layer that was frozen.
    5117              :     #[instrument(skip_all, fields(layer=%frozen_layer))]
    5118              :     async fn flush_frozen_layer(
    5119              :         self: &Arc<Self>,
    5120              :         frozen_layer: Arc<InMemoryLayer>,
    5121              :         ctx: &RequestContext,
    5122              :     ) -> Result<Lsn, FlushLayerError> {
    5123              :         debug_assert_current_span_has_tenant_and_timeline_id();
    5124              : 
    5125              :         // As a special case, when we have just imported an image into the repository,
    5126              :         // instead of writing out a L0 delta layer, we directly write out image layer
    5127              :         // files instead. This is possible as long as *all* the data imported into the
    5128              :         // repository have the same LSN.
    5129              :         let lsn_range = frozen_layer.get_lsn_range();
    5130              : 
    5131              :         // Whether to directly create image layers for this flush, or flush them as delta layers
    5132              :         let create_image_layer =
    5133              :             lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
    5134              : 
    5135              :         #[cfg(test)]
    5136              :         {
    5137              :             match &mut *self.flush_loop_state.lock().unwrap() {
    5138              :                 FlushLoopState::NotStarted | FlushLoopState::Exited => {
    5139              :                     panic!("flush loop not running")
    5140              :                 }
    5141              :                 FlushLoopState::Running {
    5142              :                     expect_initdb_optimization,
    5143              :                     initdb_optimization_count,
    5144              :                     ..
    5145              :                 } => {
    5146              :                     if create_image_layer {
    5147              :                         *initdb_optimization_count += 1;
    5148              :                     } else {
    5149              :                         assert!(!*expect_initdb_optimization, "expected initdb optimization");
    5150              :                     }
    5151              :                 }
    5152              :             }
    5153              :         }
    5154              : 
    5155              :         let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
    5156              :             // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
    5157              :             // require downloading anything during initial import.
    5158              :             let ((rel_partition, metadata_partition), _lsn) = self
    5159              :                 .repartition(
    5160              :                     self.initdb_lsn,
    5161              :                     self.get_compaction_target_size(),
    5162              :                     EnumSet::empty(),
    5163              :                     ctx,
    5164              :                 )
    5165              :                 .await
    5166            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e.into_anyhow()))?;
    5167              : 
    5168              :             if self.cancel.is_cancelled() {
    5169              :                 return Err(FlushLayerError::Cancelled);
    5170              :             }
    5171              : 
    5172              :             // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
    5173              :             // So that the key ranges don't overlap.
    5174              :             let mut partitions = KeyPartitioning::default();
    5175              :             partitions.parts.extend(rel_partition.parts);
    5176              :             if !metadata_partition.parts.is_empty() {
    5177              :                 assert_eq!(
    5178              :                     metadata_partition.parts.len(),
    5179              :                     1,
    5180              :                     "currently sparse keyspace should only contain a single metadata keyspace"
    5181              :                 );
    5182              :                 // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
    5183              :                 // every single key within the keyspace, and therefore, it's safe to force converting it
    5184              :                 // into a dense keyspace before calling this function.
    5185              :                 partitions
    5186              :                     .parts
    5187              :                     .extend(metadata_partition.into_dense().parts);
    5188              :             }
    5189              : 
    5190              :             let mut layers_to_upload = Vec::new();
    5191              :             let (generated_image_layers, is_complete) = self
    5192              :                 .create_image_layers(
    5193              :                     &partitions,
    5194              :                     self.initdb_lsn,
    5195              :                     None,
    5196              :                     ImageLayerCreationMode::Initial,
    5197              :                     ctx,
    5198              :                     LastImageLayerCreationStatus::Initial,
    5199              :                     false, // don't yield for L0, we're flushing L0
    5200              :                 )
    5201              :                 .instrument(info_span!("create_image_layers", mode = %ImageLayerCreationMode::Initial, partition_mode = "initial", lsn = %self.initdb_lsn))
    5202              :                 .await?;
    5203              :             debug_assert!(
    5204              :                 matches!(is_complete, LastImageLayerCreationStatus::Complete),
    5205              :                 "init image generation mode must fully cover the keyspace"
    5206              :             );
    5207              :             layers_to_upload.extend(generated_image_layers);
    5208              : 
    5209              :             (layers_to_upload, None)
    5210              :         } else {
    5211              :             // Normal case, write out a L0 delta layer file.
    5212              :             // `create_delta_layer` will not modify the layer map.
    5213              :             // We will remove frozen layer and add delta layer in one atomic operation later.
    5214              :             let Some(layer) = self
    5215              :                 .create_delta_layer(&frozen_layer, None, ctx)
    5216              :                 .await
    5217            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?
    5218              :             else {
    5219              :                 panic!("delta layer cannot be empty if no filter is applied");
    5220              :             };
    5221              :             (
    5222              :                 // FIXME: even though we have a single image and single delta layer assumption
    5223              :                 // we push them to vec
    5224              :                 vec![layer.clone()],
    5225              :                 Some(layer),
    5226              :             )
    5227              :         };
    5228              : 
    5229              :         pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
    5230              : 
    5231              :         if self.cancel.is_cancelled() {
    5232              :             return Err(FlushLayerError::Cancelled);
    5233              :         }
    5234              : 
    5235            1 :         fail_point!("flush-layer-before-update-remote-consistent-lsn", |_| {
    5236            1 :             Err(FlushLayerError::Other(anyhow!("failpoint").into()))
    5237            1 :         });
    5238              : 
    5239              :         let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
    5240              : 
    5241              :         // The new on-disk layers are now in the layer map. We can remove the
    5242              :         // in-memory layer from the map now. The flushed layer is stored in
    5243              :         // the mapping in `create_delta_layer`.
    5244              :         {
    5245              :             let mut guard = self
    5246              :                 .layers
    5247              :                 .write(LayerManagerLockHolder::FlushFrozenLayer)
    5248              :                 .await;
    5249              : 
    5250              :             guard.open_mut()?.finish_flush_l0_layer(
    5251              :                 delta_layer_to_add.as_ref(),
    5252              :                 &frozen_layer,
    5253              :                 &self.metrics,
    5254              :             );
    5255              : 
    5256              :             if self.set_disk_consistent_lsn(disk_consistent_lsn) {
    5257              :                 // Schedule remote uploads that will reflect our new disk_consistent_lsn
    5258              :                 self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
    5259            0 :                     .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
    5260              :             }
    5261              :             // release lock on 'layers'
    5262              :         };
    5263              : 
    5264              :         // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
    5265              :         // a compaction can delete the file and then it won't be available for uploads any more.
    5266              :         // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
    5267              :         // race situation.
    5268              :         // See https://github.com/neondatabase/neon/issues/4526
    5269              :         pausable_failpoint!("flush-frozen-pausable");
    5270              : 
    5271              :         // This failpoint is used by another test case `test_pageserver_recovery`.
    5272              :         fail_point!("flush-frozen-exit");
    5273              : 
    5274              :         Ok(Lsn(lsn_range.end.0 - 1))
    5275              :     }
    5276              : 
    5277              :     /// Return true if the value changed
    5278              :     ///
    5279              :     /// This function must only be used from the layer flush task.
    5280          596 :     fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
    5281          596 :         let old_value = self.disk_consistent_lsn.fetch_max(new_value);
    5282          596 :         assert!(
    5283          596 :             new_value >= old_value,
    5284            0 :             "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}"
    5285              :         );
    5286              : 
    5287          596 :         self.metrics
    5288          596 :             .disk_consistent_lsn_gauge
    5289          596 :             .set(new_value.0 as i64);
    5290          596 :         new_value != old_value
    5291          596 :     }
    5292              : 
    5293              :     /// Update metadata file
    5294          623 :     fn schedule_uploads(
    5295          623 :         &self,
    5296          623 :         disk_consistent_lsn: Lsn,
    5297          623 :         layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
    5298          623 :     ) -> anyhow::Result<()> {
    5299              :         // We can only save a valid 'prev_record_lsn' value on disk if we
    5300              :         // flushed *all* in-memory changes to disk. We only track
    5301              :         // 'prev_record_lsn' in memory for the latest processed record, so we
    5302              :         // don't remember what the correct value that corresponds to some old
    5303              :         // LSN is. But if we flush everything, then the value corresponding
    5304              :         // current 'last_record_lsn' is correct and we can store it on disk.
    5305              :         let RecordLsn {
    5306          623 :             last: last_record_lsn,
    5307          623 :             prev: prev_record_lsn,
    5308          623 :         } = self.last_record_lsn.load();
    5309          623 :         let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
    5310          560 :             Some(prev_record_lsn)
    5311              :         } else {
    5312           63 :             None
    5313              :         };
    5314              : 
    5315          623 :         let update = crate::tenant::metadata::MetadataUpdate::new(
    5316          623 :             disk_consistent_lsn,
    5317          623 :             ondisk_prev_record_lsn,
    5318          623 :             *self.applied_gc_cutoff_lsn.read(),
    5319              :         );
    5320              : 
    5321          623 :         fail_point!("checkpoint-before-saving-metadata", |x| bail!(
    5322            0 :             "{}",
    5323            0 :             x.unwrap()
    5324              :         ));
    5325              : 
    5326         1225 :         for layer in layers_to_upload {
    5327          602 :             self.remote_client.schedule_layer_file_upload(layer)?;
    5328              :         }
    5329          623 :         self.remote_client
    5330          623 :             .schedule_index_upload_for_metadata_update(&update)?;
    5331              : 
    5332          623 :         Ok(())
    5333          623 :     }
    5334              : 
    5335            0 :     pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
    5336            0 :         self.remote_client
    5337            0 :             .preserve_initdb_archive(
    5338            0 :                 &self.tenant_shard_id.tenant_id,
    5339            0 :                 &self.timeline_id,
    5340            0 :                 &self.cancel,
    5341            0 :             )
    5342            0 :             .await
    5343            0 :     }
    5344              : 
    5345              :     // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
    5346              :     // in layer map immediately. The caller is responsible to put it into the layer map.
    5347          486 :     async fn create_delta_layer(
    5348          486 :         self: &Arc<Self>,
    5349          486 :         frozen_layer: &Arc<InMemoryLayer>,
    5350          486 :         key_range: Option<Range<Key>>,
    5351          486 :         ctx: &RequestContext,
    5352          486 :     ) -> anyhow::Result<Option<ResidentLayer>> {
    5353          486 :         let self_clone = Arc::clone(self);
    5354          486 :         let frozen_layer = Arc::clone(frozen_layer);
    5355          486 :         let ctx = ctx.attached_child();
    5356          486 :         let work = async move {
    5357          486 :             let Some((desc, path)) = frozen_layer
    5358          486 :                 .write_to_disk(
    5359          486 :                     &ctx,
    5360          486 :                     key_range,
    5361          486 :                     self_clone.l0_flush_global_state.inner(),
    5362          486 :                     &self_clone.gate,
    5363          486 :                     self_clone.cancel.clone(),
    5364          486 :                 )
    5365          486 :                 .await?
    5366              :             else {
    5367            0 :                 return Ok(None);
    5368              :             };
    5369          486 :             let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
    5370              : 
    5371              :             // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
    5372              :             // We just need to fsync the directory in which these inodes are linked,
    5373              :             // which we know to be the timeline directory.
    5374              :             //
    5375              :             // We use fatal_err() below because the after write_to_disk returns with success,
    5376              :             // the in-memory state of the filesystem already has the layer file in its final place,
    5377              :             // and subsequent pageserver code could think it's durable while it really isn't.
    5378          486 :             let timeline_dir = VirtualFile::open(
    5379          486 :                 &self_clone
    5380          486 :                     .conf
    5381          486 :                     .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
    5382          486 :                 &ctx,
    5383          486 :             )
    5384          486 :             .await
    5385          486 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    5386          486 :             timeline_dir
    5387          486 :                 .sync_all()
    5388          486 :                 .await
    5389          486 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    5390          486 :             anyhow::Ok(Some(new_delta))
    5391          486 :         };
    5392              :         // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
    5393              :         // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
    5394              :         use crate::virtual_file::io_engine::IoEngine;
    5395          486 :         match crate::virtual_file::io_engine::get() {
    5396            0 :             IoEngine::NotSet => panic!("io engine not set"),
    5397              :             IoEngine::StdFs => {
    5398            0 :                 let span = tracing::info_span!("blocking");
    5399            0 :                 tokio::task::spawn_blocking({
    5400            0 :                     move || Handle::current().block_on(work.instrument(span))
    5401              :                 })
    5402            0 :                 .await
    5403            0 :                 .context("spawn_blocking")
    5404            0 :                 .and_then(|x| x)
    5405              :             }
    5406              :             #[cfg(target_os = "linux")]
    5407          486 :             IoEngine::TokioEpollUring => work.await,
    5408              :         }
    5409          486 :     }
    5410              : 
    5411          303 :     async fn repartition(
    5412          303 :         &self,
    5413          303 :         lsn: Lsn,
    5414          303 :         partition_size: u64,
    5415          303 :         flags: EnumSet<CompactFlags>,
    5416          303 :         ctx: &RequestContext,
    5417          303 :     ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), RepartitionError> {
    5418          303 :         let Ok(mut guard) = self.partitioning.try_write_guard() else {
    5419              :             // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
    5420              :             // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
    5421              :             // and hence before the compaction task starts.
    5422            0 :             return Err(RepartitionError::Other(anyhow!(
    5423            0 :                 "repartition() called concurrently"
    5424            0 :             )));
    5425              :         };
    5426          303 :         let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
    5427          303 :         if lsn < *partition_lsn {
    5428            0 :             return Err(RepartitionError::Other(anyhow!(
    5429            0 :                 "repartition() called with LSN going backwards, this should not happen"
    5430            0 :             )));
    5431          303 :         }
    5432              : 
    5433          303 :         let distance = lsn.0 - partition_lsn.0;
    5434          303 :         if *partition_lsn != Lsn(0)
    5435          141 :             && distance <= self.repartition_threshold
    5436          141 :             && !flags.contains(CompactFlags::ForceRepartition)
    5437              :         {
    5438          134 :             debug!(
    5439              :                 distance,
    5440              :                 threshold = self.repartition_threshold,
    5441            0 :                 "no repartitioning needed"
    5442              :             );
    5443          134 :             return Ok((
    5444          134 :                 (dense_partition.clone(), sparse_partition.clone()),
    5445          134 :                 *partition_lsn,
    5446          134 :             ));
    5447          169 :         }
    5448              : 
    5449          169 :         let (dense_ks, sparse_ks) = self
    5450          169 :             .collect_keyspace(lsn, ctx)
    5451          169 :             .await
    5452          169 :             .map_err(RepartitionError::CollectKeyspace)?;
    5453          169 :         let dense_partitioning = dense_ks.partition(
    5454          169 :             &self.shard_identity,
    5455          169 :             partition_size,
    5456          169 :             postgres_ffi::BLCKSZ as u64,
    5457              :         );
    5458          169 :         let sparse_partitioning = SparseKeyPartitioning {
    5459          169 :             parts: vec![sparse_ks],
    5460          169 :         }; // no partitioning for metadata keys for now
    5461          169 :         let result = ((dense_partitioning, sparse_partitioning), lsn);
    5462          169 :         guard.write(result.clone());
    5463          169 :         Ok(result)
    5464          303 :     }
    5465              : 
    5466              :     // Is it time to create a new image layer for the given partition? True if we want to generate.
    5467           57 :     async fn time_for_new_image_layer(
    5468           57 :         &self,
    5469           57 :         partition: &KeySpace,
    5470           57 :         lsn: Lsn,
    5471           57 :         force_image_creation_lsn: Option<Lsn>,
    5472           57 :     ) -> bool {
    5473           57 :         let threshold = self.get_image_creation_threshold();
    5474              : 
    5475           57 :         let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
    5476           57 :         let Ok(layers) = guard.layer_map() else {
    5477            0 :             return false;
    5478              :         };
    5479           57 :         let mut min_image_lsn: Lsn = Lsn::MAX;
    5480           57 :         let mut max_deltas = 0;
    5481          364 :         for part_range in &partition.ranges {
    5482          307 :             let image_coverage = layers.image_coverage(part_range, lsn);
    5483          614 :             for (img_range, last_img) in image_coverage {
    5484          307 :                 let img_lsn = if let Some(last_img) = last_img {
    5485            0 :                     last_img.get_lsn_range().end
    5486              :                 } else {
    5487          307 :                     Lsn(0)
    5488              :                 };
    5489              :                 // Let's consider an example:
    5490              :                 //
    5491              :                 // delta layer with LSN range 71-81
    5492              :                 // delta layer with LSN range 81-91
    5493              :                 // delta layer with LSN range 91-101
    5494              :                 // image layer at LSN 100
    5495              :                 //
    5496              :                 // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
    5497              :                 // there's no need to create a new one. We check this case explicitly, to avoid passing
    5498              :                 // a bogus range to count_deltas below, with start > end. It's even possible that there
    5499              :                 // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
    5500              :                 // after we read last_record_lsn, which is passed here in the 'lsn' argument.
    5501          307 :                 if img_lsn < lsn {
    5502          307 :                     let num_deltas =
    5503          307 :                         layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
    5504              : 
    5505          307 :                     max_deltas = max_deltas.max(num_deltas);
    5506          307 :                     if num_deltas >= threshold {
    5507            0 :                         debug!(
    5508            0 :                             "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
    5509              :                             img_range.start, img_range.end, num_deltas, img_lsn, lsn
    5510              :                         );
    5511            0 :                         return true;
    5512          307 :                     }
    5513            0 :                 }
    5514          307 :                 min_image_lsn = min(min_image_lsn, img_lsn);
    5515              :             }
    5516              :         }
    5517              : 
    5518              :         // HADRON
    5519              :         // for child timelines, we consider all pages up to ancestor_LSN are redone successfully by the parent timeline
    5520           57 :         min_image_lsn = min_image_lsn.max(self.get_ancestor_lsn());
    5521           57 :         if min_image_lsn < force_image_creation_lsn.unwrap_or(Lsn(0)) && max_deltas > 0 {
    5522            0 :             info!(
    5523            0 :                 "forcing image creation for partitioned range {}-{}. Min image LSN: {}, force image creation LSN: {}, num deltas: {}",
    5524            0 :                 partition.ranges[0].start,
    5525            0 :                 partition.ranges[0].end,
    5526              :                 min_image_lsn,
    5527            0 :                 force_image_creation_lsn.unwrap(),
    5528              :                 max_deltas
    5529              :             );
    5530            0 :             return true;
    5531           57 :         }
    5532              : 
    5533           57 :         debug!(
    5534              :             max_deltas,
    5535            0 :             "none of the partitioned ranges had >= {threshold} deltas"
    5536              :         );
    5537           57 :         false
    5538           57 :     }
    5539              : 
    5540              :     /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
    5541              :     /// so that at most one image layer will be produced from this function.
    5542              :     #[allow(clippy::too_many_arguments)]
    5543          124 :     async fn create_image_layer_for_rel_blocks(
    5544          124 :         self: &Arc<Self>,
    5545          124 :         partition: &KeySpace,
    5546          124 :         mut image_layer_writer: ImageLayerWriter,
    5547          124 :         lsn: Lsn,
    5548          124 :         ctx: &RequestContext,
    5549          124 :         img_range: Range<Key>,
    5550          124 :         io_concurrency: IoConcurrency,
    5551          124 :         progress: Option<(usize, usize)>,
    5552          124 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    5553          124 :         let mut wrote_keys = false;
    5554              : 
    5555          124 :         let mut key_request_accum = KeySpaceAccum::new();
    5556          824 :         for range in &partition.ranges {
    5557          700 :             let mut key = range.start;
    5558         1517 :             while key < range.end {
    5559              :                 // Decide whether to retain this key: usually we do, but sharded tenants may
    5560              :                 // need to drop keys that don't belong to them.  If we retain the key, add it
    5561              :                 // to `key_request_accum` for later issuing a vectored get
    5562          817 :                 if self.shard_identity.is_key_disposable(&key) {
    5563            0 :                     debug!(
    5564            0 :                         "Dropping key {} during compaction (it belongs on shard {:?})",
    5565              :                         key,
    5566            0 :                         self.shard_identity.get_shard_number(&key)
    5567              :                     );
    5568          817 :                 } else {
    5569          817 :                     key_request_accum.add_key(key);
    5570          817 :                 }
    5571              : 
    5572          817 :                 let last_key_in_range = key.next() == range.end;
    5573          817 :                 key = key.next();
    5574              : 
    5575              :                 // Maybe flush `key_rest_accum`
    5576          817 :                 if key_request_accum.raw_size() >= self.conf.max_get_vectored_keys.get() as u64
    5577          817 :                     || (last_key_in_range && key_request_accum.raw_size() > 0)
    5578              :                 {
    5579          700 :                     let query =
    5580          700 :                         VersionedKeySpaceQuery::uniform(key_request_accum.consume_keyspace(), lsn);
    5581              : 
    5582          700 :                     let results = self
    5583          700 :                         .get_vectored(query, io_concurrency.clone(), ctx)
    5584          700 :                         .await?;
    5585              : 
    5586          700 :                     if self.cancel.is_cancelled() {
    5587            0 :                         return Err(CreateImageLayersError::Cancelled);
    5588          700 :                     }
    5589              : 
    5590         1517 :                     for (img_key, img) in results {
    5591          817 :                         let img = match img {
    5592          817 :                             Ok(img) => img,
    5593            0 :                             Err(err) => {
    5594              :                                 // If we fail to reconstruct a VM or FSM page, we can zero the
    5595              :                                 // page without losing any actual user data. That seems better
    5596              :                                 // than failing repeatedly and getting stuck.
    5597              :                                 //
    5598              :                                 // We had a bug at one point, where we truncated the FSM and VM
    5599              :                                 // in the pageserver, but the Postgres didn't know about that
    5600              :                                 // and continued to generate incremental WAL records for pages
    5601              :                                 // that didn't exist in the pageserver. Trying to replay those
    5602              :                                 // WAL records failed to find the previous image of the page.
    5603              :                                 // This special case allows us to recover from that situation.
    5604              :                                 // See https://github.com/neondatabase/neon/issues/2601.
    5605              :                                 //
    5606              :                                 // Unfortunately we cannot do this for the main fork, or for
    5607              :                                 // any metadata keys, keys, as that would lead to actual data
    5608              :                                 // loss.
    5609            0 :                                 if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
    5610            0 :                                     warn!(
    5611            0 :                                         "could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"
    5612              :                                     );
    5613            0 :                                     ZERO_PAGE.clone()
    5614              :                                 } else {
    5615            0 :                                     return Err(CreateImageLayersError::from(err));
    5616              :                                 }
    5617              :                             }
    5618              :                         };
    5619              : 
    5620              :                         // Write all the keys we just read into our new image layer.
    5621          817 :                         image_layer_writer.put_image(img_key, img, ctx).await?;
    5622          817 :                         wrote_keys = true;
    5623              :                     }
    5624          117 :                 }
    5625              :             }
    5626              :         }
    5627              : 
    5628          124 :         let progress_report = progress
    5629          124 :             .map(|(idx, total)| format!("({idx}/{total}) "))
    5630          124 :             .unwrap_or_default();
    5631          124 :         if wrote_keys {
    5632              :             // Normal path: we have written some data into the new image layer for this
    5633              :             // partition, so flush it to disk.
    5634          124 :             info!(
    5635            0 :                 "{} produced image layer for rel {}",
    5636              :                 progress_report,
    5637            0 :                 ImageLayerName {
    5638            0 :                     key_range: img_range.clone(),
    5639            0 :                     lsn
    5640            0 :                 },
    5641              :             );
    5642          124 :             Ok(ImageLayerCreationOutcome::Generated {
    5643          124 :                 unfinished_image_layer: image_layer_writer,
    5644          124 :             })
    5645              :         } else {
    5646            0 :             tracing::debug!(
    5647            0 :                 "{} no data in range {}-{}",
    5648              :                 progress_report,
    5649              :                 img_range.start,
    5650              :                 img_range.end
    5651              :             );
    5652            0 :             Ok(ImageLayerCreationOutcome::Empty)
    5653              :         }
    5654          124 :     }
    5655              : 
    5656              :     /// Create an image layer for metadata keys. This function produces one image layer for all metadata
    5657              :     /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
    5658              :     /// would not be too large to fit in a single image layer.
    5659              :     ///
    5660              :     /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
    5661              :     /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
    5662              :     /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
    5663              :     /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
    5664              :     #[allow(clippy::too_many_arguments)]
    5665          169 :     async fn create_image_layer_for_metadata_keys(
    5666          169 :         self: &Arc<Self>,
    5667          169 :         partition: &KeySpace,
    5668          169 :         mut image_layer_writer: ImageLayerWriter,
    5669          169 :         lsn: Lsn,
    5670          169 :         ctx: &RequestContext,
    5671          169 :         img_range: Range<Key>,
    5672          169 :         mode: ImageLayerCreationMode,
    5673          169 :         io_concurrency: IoConcurrency,
    5674          169 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    5675              :         // Metadata keys image layer creation.
    5676          169 :         let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
    5677          169 :         let begin = Instant::now();
    5678              :         // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
    5679              :         // not contain too many keys, otherwise this takes a lot of memory.
    5680          169 :         let data = self
    5681          169 :             .get_vectored_impl(
    5682          169 :                 VersionedKeySpaceQuery::uniform(partition.clone(), lsn),
    5683          169 :                 &mut reconstruct_state,
    5684          169 :                 ctx,
    5685          169 :             )
    5686          169 :             .await?;
    5687          169 :         let (data, total_kb_retrieved, total_keys_retrieved) = {
    5688          169 :             let mut new_data = BTreeMap::new();
    5689          169 :             let mut total_kb_retrieved = 0;
    5690          169 :             let mut total_keys_retrieved = 0;
    5691         5175 :             for (k, v) in data {
    5692         5006 :                 let v = v?;
    5693         5006 :                 total_kb_retrieved += KEY_SIZE + v.len();
    5694         5006 :                 total_keys_retrieved += 1;
    5695         5006 :                 new_data.insert(k, v);
    5696              :             }
    5697          169 :             (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
    5698              :         };
    5699          169 :         let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
    5700          169 :         let elapsed = begin.elapsed();
    5701              : 
    5702          169 :         let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
    5703          169 :         info!(
    5704            0 :             "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s",
    5705            0 :             elapsed.as_secs_f64()
    5706              :         );
    5707              : 
    5708          169 :         if !trigger_generation && mode == ImageLayerCreationMode::Try {
    5709           16 :             return Ok(ImageLayerCreationOutcome::Skip);
    5710          153 :         }
    5711          153 :         if self.cancel.is_cancelled() {
    5712            0 :             return Err(CreateImageLayersError::Cancelled);
    5713          153 :         }
    5714          153 :         let mut wrote_any_image = false;
    5715         5159 :         for (k, v) in data {
    5716         5006 :             if v.is_empty() {
    5717              :                 // the key has been deleted, it does not need an image
    5718              :                 // in metadata keyspace, an empty image == tombstone
    5719            4 :                 continue;
    5720         5002 :             }
    5721         5002 :             wrote_any_image = true;
    5722              : 
    5723              :             // No need to handle sharding b/c metadata keys are always on the 0-th shard.
    5724              : 
    5725              :             // TODO: split image layers to avoid too large layer files. Too large image files are not handled
    5726              :             // on the normal data path either.
    5727         5002 :             image_layer_writer.put_image(k, v, ctx).await?;
    5728              :         }
    5729              : 
    5730          153 :         if wrote_any_image {
    5731              :             // Normal path: we have written some data into the new image layer for this
    5732              :             // partition, so flush it to disk.
    5733            6 :             info!(
    5734            0 :                 "created image layer for metadata {}",
    5735            0 :                 ImageLayerName {
    5736            0 :                     key_range: img_range.clone(),
    5737            0 :                     lsn
    5738            0 :                 }
    5739              :             );
    5740            6 :             Ok(ImageLayerCreationOutcome::Generated {
    5741            6 :                 unfinished_image_layer: image_layer_writer,
    5742            6 :             })
    5743              :         } else {
    5744          147 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    5745          147 :             Ok(ImageLayerCreationOutcome::Empty)
    5746              :         }
    5747          169 :     }
    5748              : 
    5749              :     /// Predicate function which indicates whether we should check if new image layers
    5750              :     /// are required. Since checking if new image layers are required is expensive in
    5751              :     /// terms of CPU, we only do it in the following cases:
    5752              :     /// 1. If the timeline has ingested sufficient WAL to justify the cost or ...
    5753              :     /// 2. If enough time has passed since the last check:
    5754              :     ///     1. For large tenants, we wish to perform the check more often since they
    5755              :     ///        suffer from the lack of image layers. Note that we assume sharded tenants
    5756              :     ///        to be large since non-zero shards do not track the logical size.
    5757              :     ///     2. For small tenants (that can mostly fit in RAM), we use a much longer interval
    5758          191 :     fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
    5759          191 :         let large_timeline_threshold = self.conf.image_layer_generation_large_timeline_threshold;
    5760              : 
    5761          191 :         let last_checks_at = self.last_image_layer_creation_check_at.load();
    5762          191 :         let distance = lsn
    5763          191 :             .checked_sub(last_checks_at)
    5764          191 :             .expect("Attempt to compact with LSN going backwards");
    5765          191 :         let min_distance =
    5766          191 :             self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
    5767              : 
    5768          191 :         let distance_based_decision = distance.0 >= min_distance;
    5769              : 
    5770          191 :         let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
    5771          191 :         let check_required_after = (|| {
    5772          191 :             if self.shard_identity.is_unsharded() {
    5773          135 :                 if let CurrentLogicalSize::Exact(logical_size) =
    5774          186 :                     self.current_logical_size.current_size()
    5775              :                 {
    5776          135 :                     if Some(Into::<u64>::into(&logical_size)) < large_timeline_threshold {
    5777          135 :                         return Duration::from_secs(3600 * 48);
    5778            0 :                     }
    5779           51 :                 }
    5780            5 :             }
    5781              : 
    5782           56 :             self.get_checkpoint_timeout()
    5783              :         })();
    5784              : 
    5785          191 :         let time_based_decision = match *last_check_instant {
    5786           29 :             Some(last_check) => {
    5787           29 :                 let elapsed = last_check.elapsed();
    5788           29 :                 elapsed >= check_required_after
    5789              :             }
    5790          162 :             None => true,
    5791              :         };
    5792              : 
    5793              :         // Do the expensive delta layer counting only if this timeline has ingested sufficient
    5794              :         // WAL since the last check or a checkpoint timeout interval has elapsed since the last
    5795              :         // check.
    5796          191 :         let decision = distance_based_decision || time_based_decision;
    5797          191 :         tracing::info!(
    5798            0 :             "Decided to check image layers: {}. Distance-based decision: {}, time-based decision: {}",
    5799              :             decision,
    5800              :             distance_based_decision,
    5801              :             time_based_decision
    5802              :         );
    5803          191 :         if decision {
    5804          162 :             self.last_image_layer_creation_check_at.store(lsn);
    5805          162 :             *last_check_instant = Some(Instant::now());
    5806          162 :         }
    5807              : 
    5808          191 :         decision
    5809          191 :     }
    5810              : 
    5811              :     /// Returns the image layers generated and an enum indicating whether the process is fully completed.
    5812              :     /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
    5813              :     ///
    5814              :     /// `partition_mode` is only for logging purpose and is not used anywhere in this function.
    5815              :     #[allow(clippy::too_many_arguments)]
    5816          191 :     async fn create_image_layers(
    5817          191 :         self: &Arc<Timeline>,
    5818          191 :         partitioning: &KeyPartitioning,
    5819          191 :         lsn: Lsn,
    5820          191 :         force_image_creation_lsn: Option<Lsn>,
    5821          191 :         mode: ImageLayerCreationMode,
    5822          191 :         ctx: &RequestContext,
    5823          191 :         last_status: LastImageLayerCreationStatus,
    5824          191 :         yield_for_l0: bool,
    5825          191 :     ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
    5826          191 :         let timer = self.metrics.create_images_time_histo.start_timer();
    5827              : 
    5828          191 :         if partitioning.parts.is_empty() {
    5829            0 :             warn!("no partitions to create image layers for");
    5830            0 :             return Ok((vec![], LastImageLayerCreationStatus::Complete));
    5831          191 :         }
    5832              : 
    5833              :         // We need to avoid holes between generated image layers.
    5834              :         // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
    5835              :         // image layer with hole between them. In this case such layer can not be utilized by GC.
    5836              :         //
    5837              :         // How such hole between partitions can appear?
    5838              :         // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
    5839              :         // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
    5840              :         // If there is delta layer <100000000..300000000> then it never be garbage collected because
    5841              :         // image layers  <100000000..100000099> and <200000000..200000199> are not completely covering it.
    5842          191 :         let mut start = Key::MIN;
    5843              : 
    5844          191 :         let check_for_image_layers =
    5845          191 :             if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
    5846            0 :                 info!(
    5847            0 :                     "resuming image layer creation: last_status=incomplete, continue from {}",
    5848              :                     last_key
    5849              :                 );
    5850            0 :                 true
    5851              :             } else {
    5852          191 :                 self.should_check_if_image_layers_required(lsn)
    5853              :             };
    5854              : 
    5855          191 :         let mut batch_image_writer = BatchLayerWriter::new(self.conf);
    5856              : 
    5857          191 :         let mut all_generated = true;
    5858              : 
    5859          191 :         let mut partition_processed = 0;
    5860          191 :         let mut total_partitions = partitioning.parts.len();
    5861          191 :         let mut last_partition_processed = None;
    5862          191 :         let mut partition_parts = partitioning.parts.clone();
    5863              : 
    5864          191 :         if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
    5865              :             // We need to skip the partitions that have already been processed.
    5866            0 :             let mut found = false;
    5867            0 :             for (i, partition) in partition_parts.iter().enumerate() {
    5868            0 :                 if last_key <= partition.end().unwrap() {
    5869              :                     // ```plain
    5870              :                     // |------|--------|----------|------|
    5871              :                     //              ^last_key
    5872              :                     //                    ^start from this partition
    5873              :                     // ```
    5874              :                     // Why `i+1` instead of `i`?
    5875              :                     // It is possible that the user did some writes after the previous image layer creation attempt so that
    5876              :                     // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
    5877              :                     // still want to skip this partition, so that we can make progress and avoid generating image layers over
    5878              :                     // the same partition. Doing a mod to ensure we don't end up with an empty vec.
    5879            0 :                     if i + 1 >= total_partitions {
    5880              :                         // In general, this case should not happen -- if last_key is on the last partition, the previous
    5881              :                         // iteration of image layer creation should return a complete status.
    5882            0 :                         break; // with found=false
    5883            0 :                     }
    5884            0 :                     partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
    5885            0 :                     total_partitions = partition_parts.len();
    5886              :                     // Update the start key to the partition start.
    5887            0 :                     start = partition_parts[0].start().unwrap();
    5888            0 :                     found = true;
    5889            0 :                     break;
    5890            0 :                 }
    5891              :             }
    5892            0 :             if !found {
    5893              :                 // Last key is within the last partition, or larger than all partitions.
    5894            0 :                 return Ok((vec![], LastImageLayerCreationStatus::Complete));
    5895            0 :             }
    5896          191 :         }
    5897              : 
    5898          191 :         let total = partition_parts.len();
    5899          394 :         for (idx, partition) in partition_parts.iter().enumerate() {
    5900          394 :             if self.cancel.is_cancelled() {
    5901            0 :                 return Err(CreateImageLayersError::Cancelled);
    5902          394 :             }
    5903          394 :             partition_processed += 1;
    5904          394 :             let img_range = start..partition.ranges.last().unwrap().end;
    5905          394 :             let compact_metadata = partition.overlaps(&Key::metadata_key_range());
    5906          394 :             if compact_metadata {
    5907          764 :                 for range in &partition.ranges {
    5908          573 :                     assert!(
    5909          573 :                         range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
    5910          573 :                             && range.end.field1 <= METADATA_KEY_END_PREFIX,
    5911            0 :                         "metadata keys must be partitioned separately"
    5912              :                     );
    5913              :                 }
    5914          191 :                 if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
    5915              :                     // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
    5916              :                     // might mess up with evictions.
    5917           22 :                     start = img_range.end;
    5918           22 :                     continue;
    5919          169 :                 }
    5920              :                 // For initial and force modes, we always generate image layers for metadata keys.
    5921          203 :             } else if let ImageLayerCreationMode::Try = mode {
    5922              :                 // check_for_image_layers = false -> skip
    5923              :                 // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
    5924           79 :                 if !check_for_image_layers
    5925           57 :                     || !self
    5926           57 :                         .time_for_new_image_layer(partition, lsn, force_image_creation_lsn)
    5927           57 :                         .await
    5928              :                 {
    5929           79 :                     start = img_range.end;
    5930           79 :                     continue;
    5931            0 :                 }
    5932          124 :             }
    5933          293 :             if let ImageLayerCreationMode::Force = mode {
    5934              :                 // When forced to create image layers, we might try and create them where they already
    5935              :                 // exist.  This mode is only used in tests/debug.
    5936           14 :                 let layers = self.layers.read(LayerManagerLockHolder::Compaction).await;
    5937           14 :                 if layers.contains_key(&PersistentLayerKey {
    5938           14 :                     key_range: img_range.clone(),
    5939           14 :                     lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
    5940           14 :                     is_delta: false,
    5941           14 :                 }) {
    5942              :                     // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
    5943              :                     // in the future.
    5944            0 :                     tracing::info!(
    5945            0 :                         "Skipping image layer at {lsn} {}..{}, already exists",
    5946              :                         img_range.start,
    5947              :                         img_range.end
    5948              :                     );
    5949            0 :                     start = img_range.end;
    5950            0 :                     continue;
    5951           14 :                 }
    5952          279 :             }
    5953              : 
    5954          293 :             let image_layer_writer = ImageLayerWriter::new(
    5955          293 :                 self.conf,
    5956          293 :                 self.timeline_id,
    5957          293 :                 self.tenant_shard_id,
    5958          293 :                 &img_range,
    5959          293 :                 lsn,
    5960          293 :                 &self.gate,
    5961          293 :                 self.cancel.clone(),
    5962          293 :                 ctx,
    5963          293 :             )
    5964          293 :             .await
    5965          293 :             .map_err(CreateImageLayersError::Other)?;
    5966              : 
    5967          293 :             fail_point!("image-layer-writer-fail-before-finish", |_| {
    5968            0 :                 Err(CreateImageLayersError::Other(anyhow::anyhow!(
    5969            0 :                     "failpoint image-layer-writer-fail-before-finish"
    5970            0 :                 )))
    5971            0 :             });
    5972              : 
    5973          293 :             let io_concurrency = IoConcurrency::spawn_from_conf(
    5974          293 :                 self.conf.get_vectored_concurrent_io,
    5975          293 :                 self.gate
    5976          293 :                     .enter()
    5977          293 :                     .map_err(|_| CreateImageLayersError::Cancelled)?,
    5978              :             );
    5979              : 
    5980          293 :             let outcome = if !compact_metadata {
    5981          124 :                 self.create_image_layer_for_rel_blocks(
    5982          124 :                     partition,
    5983          124 :                     image_layer_writer,
    5984          124 :                     lsn,
    5985          124 :                     ctx,
    5986          124 :                     img_range.clone(),
    5987          124 :                     io_concurrency,
    5988          124 :                     Some((idx, total)),
    5989          124 :                 )
    5990          124 :                 .await?
    5991              :             } else {
    5992          169 :                 self.create_image_layer_for_metadata_keys(
    5993          169 :                     partition,
    5994          169 :                     image_layer_writer,
    5995          169 :                     lsn,
    5996          169 :                     ctx,
    5997          169 :                     img_range.clone(),
    5998          169 :                     mode,
    5999          169 :                     io_concurrency,
    6000          169 :                 )
    6001          169 :                 .await?
    6002              :             };
    6003          293 :             match outcome {
    6004          147 :                 ImageLayerCreationOutcome::Empty => {
    6005          147 :                     // No data in this partition, so we don't need to create an image layer (for now).
    6006          147 :                     // The next image layer should cover this key range, so we don't advance the `start`
    6007          147 :                     // key.
    6008          147 :                 }
    6009              :                 ImageLayerCreationOutcome::Generated {
    6010          130 :                     unfinished_image_layer,
    6011          130 :                 } => {
    6012          130 :                     batch_image_writer.add_unfinished_image_writer(
    6013          130 :                         unfinished_image_layer,
    6014          130 :                         img_range.clone(),
    6015          130 :                         lsn,
    6016          130 :                     );
    6017          130 :                     // The next image layer should be generated right after this one.
    6018          130 :                     start = img_range.end;
    6019          130 :                 }
    6020           16 :                 ImageLayerCreationOutcome::Skip => {
    6021           16 :                     // We don't need to create an image layer for this partition.
    6022           16 :                     // The next image layer should NOT cover this range, otherwise
    6023           16 :                     // the keyspace becomes empty (reads don't go past image layers).
    6024           16 :                     start = img_range.end;
    6025           16 :                 }
    6026              :             }
    6027              : 
    6028          293 :             if let ImageLayerCreationMode::Try = mode {
    6029              :                 // We have at least made some progress
    6030           51 :                 if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
    6031              :                     // The `Try` mode is currently only used on the compaction path. We want to avoid
    6032              :                     // image layer generation taking too long time and blocking L0 compaction. So in this
    6033              :                     // mode, we also inspect the current number of L0 layers and skip image layer generation
    6034              :                     // if there are too many of them.
    6035            0 :                     let image_preempt_threshold = self.get_image_creation_preempt_threshold()
    6036            0 :                         * self.get_compaction_threshold();
    6037              :                     // TODO: currently we do not respect `get_image_creation_preempt_threshold` and always yield
    6038              :                     // when there is a single timeline with more than L0 threshold L0 layers. As long as the
    6039              :                     // `get_image_creation_preempt_threshold` is set to a value greater than 0, we will yield for L0 compaction.
    6040            0 :                     if image_preempt_threshold != 0 {
    6041            0 :                         let should_yield = self
    6042            0 :                             .l0_compaction_trigger
    6043            0 :                             .notified()
    6044            0 :                             .now_or_never()
    6045            0 :                             .is_some();
    6046            0 :                         if should_yield {
    6047            0 :                             tracing::info!(
    6048            0 :                                 "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers",
    6049            0 :                                 partition.start().unwrap(),
    6050            0 :                                 partition.end().unwrap()
    6051              :                             );
    6052            0 :                             last_partition_processed = Some(partition.clone());
    6053            0 :                             all_generated = false;
    6054            0 :                             break;
    6055            0 :                         }
    6056            0 :                     }
    6057           51 :                 }
    6058          242 :             }
    6059              :         }
    6060              : 
    6061          191 :         let image_layers = batch_image_writer
    6062          191 :             .finish(self, ctx)
    6063          191 :             .await
    6064          191 :             .map_err(CreateImageLayersError::Other)?;
    6065              : 
    6066          191 :         let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await;
    6067              : 
    6068              :         // FIXME: we could add the images to be uploaded *before* returning from here, but right
    6069              :         // now they are being scheduled outside of write lock; current way is inconsistent with
    6070              :         // compaction lock order.
    6071          191 :         guard
    6072          191 :             .open_mut()?
    6073          191 :             .track_new_image_layers(&image_layers, &self.metrics);
    6074          191 :         drop_layer_manager_wlock(guard);
    6075          191 :         let duration = timer.stop_and_record();
    6076              : 
    6077              :         // Creating image layers may have caused some previously visible layers to be covered
    6078          191 :         if !image_layers.is_empty() {
    6079          118 :             self.update_layer_visibility().await?;
    6080           73 :         }
    6081              : 
    6082          191 :         let total_layer_size = image_layers
    6083          191 :             .iter()
    6084          191 :             .map(|l| l.metadata().file_size)
    6085          191 :             .sum::<u64>();
    6086              : 
    6087          191 :         if !image_layers.is_empty() {
    6088          118 :             info!(
    6089            0 :                 "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
    6090            0 :                 image_layers.len(),
    6091              :                 total_layer_size,
    6092            0 :                 duration.as_secs_f64(),
    6093              :                 partition_processed,
    6094              :                 total_partitions
    6095              :             );
    6096           73 :         }
    6097              : 
    6098              :         Ok((
    6099          191 :             image_layers,
    6100          191 :             if all_generated {
    6101          191 :                 LastImageLayerCreationStatus::Complete
    6102              :             } else {
    6103              :                 LastImageLayerCreationStatus::Incomplete {
    6104            0 :                     last_key: if let Some(last_partition_processed) = last_partition_processed {
    6105            0 :                         last_partition_processed.end().unwrap_or(Key::MIN)
    6106              :                     } else {
    6107              :                         // This branch should be unreachable, but in case it happens, we can just return the start key.
    6108            0 :                         Key::MIN
    6109              :                     },
    6110              :                 }
    6111              :             },
    6112              :         ))
    6113          191 :     }
    6114              : 
    6115              :     /// Wait until the background initial logical size calculation is complete, or
    6116              :     /// this Timeline is shut down.  Calling this function will cause the initial
    6117              :     /// logical size calculation to skip waiting for the background jobs barrier.
    6118            0 :     pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
    6119            0 :         if !self.shard_identity.is_shard_zero() {
    6120              :             // We don't populate logical size on shard >0: skip waiting for it.
    6121            0 :             return;
    6122            0 :         }
    6123              : 
    6124            0 :         if self.remote_client.is_deleting() {
    6125              :             // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
    6126            0 :             return;
    6127            0 :         }
    6128              : 
    6129            0 :         if self.current_logical_size.current_size().is_exact() {
    6130              :             // root timelines are initialized with exact count, but never start the background
    6131              :             // calculation
    6132            0 :             return;
    6133            0 :         }
    6134              : 
    6135            0 :         if self.cancel.is_cancelled() {
    6136              :             // We already requested stopping the tenant, so we cannot wait for the logical size
    6137              :             // calculation to complete given the task might have been already cancelled.
    6138            0 :             return;
    6139            0 :         }
    6140              : 
    6141            0 :         if let Some(await_bg_cancel) = self
    6142            0 :             .current_logical_size
    6143            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore
    6144            0 :             .get()
    6145            0 :         {
    6146            0 :             await_bg_cancel.cancel();
    6147            0 :         } else {
    6148              :             // We should not wait if we were not able to explicitly instruct
    6149              :             // the logical size cancellation to skip the concurrency limit semaphore.
    6150              :             // TODO: this is an unexpected case.  We should restructure so that it
    6151              :             // can't happen.
    6152            0 :             tracing::warn!(
    6153            0 :                 "await_initial_logical_size: can't get semaphore cancel token, skipping"
    6154              :             );
    6155            0 :             debug_assert!(false);
    6156              :         }
    6157              : 
    6158            0 :         tokio::select!(
    6159            0 :             _ = self.current_logical_size.initialized.acquire() => {},
    6160            0 :             _ = self.cancel.cancelled() => {}
    6161              :         )
    6162            0 :     }
    6163              : 
    6164              :     /// Detach this timeline from its ancestor by copying all of ancestors layers as this
    6165              :     /// Timelines layers up to the ancestor_lsn.
    6166              :     ///
    6167              :     /// Requires a timeline that:
    6168              :     /// - has an ancestor to detach from
    6169              :     /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
    6170              :     ///   a technical requirement
    6171              :     ///
    6172              :     /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
    6173              :     /// polled again until completion.
    6174              :     ///
    6175              :     /// During the operation all timelines sharing the data with this timeline will be reparented
    6176              :     /// from our ancestor to be branches of this timeline.
    6177            0 :     pub(crate) async fn prepare_to_detach_from_ancestor(
    6178            0 :         self: &Arc<Timeline>,
    6179            0 :         tenant: &crate::tenant::TenantShard,
    6180            0 :         options: detach_ancestor::Options,
    6181            0 :         behavior: DetachBehavior,
    6182            0 :         ctx: &RequestContext,
    6183            0 :     ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
    6184            0 :         detach_ancestor::prepare(self, tenant, behavior, options, ctx).await
    6185            0 :     }
    6186              : 
    6187              :     /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
    6188              :     /// reparents any reparentable children of previous ancestor.
    6189              :     ///
    6190              :     /// This method is to be called while holding the TenantManager's tenant slot, so during this
    6191              :     /// method we cannot be deleted nor can any timeline be deleted. After this method returns
    6192              :     /// successfully, tenant must be reloaded.
    6193              :     ///
    6194              :     /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
    6195              :     /// resetting the tenant.
    6196            0 :     pub(crate) async fn detach_from_ancestor_and_reparent(
    6197            0 :         self: &Arc<Timeline>,
    6198            0 :         tenant: &crate::tenant::TenantShard,
    6199            0 :         prepared: detach_ancestor::PreparedTimelineDetach,
    6200            0 :         ancestor_timeline_id: TimelineId,
    6201            0 :         ancestor_lsn: Lsn,
    6202            0 :         behavior: DetachBehavior,
    6203            0 :         ctx: &RequestContext,
    6204            0 :     ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
    6205            0 :         detach_ancestor::detach_and_reparent(
    6206            0 :             self,
    6207            0 :             tenant,
    6208            0 :             prepared,
    6209            0 :             ancestor_timeline_id,
    6210            0 :             ancestor_lsn,
    6211            0 :             behavior,
    6212            0 :             ctx,
    6213            0 :         )
    6214            0 :         .await
    6215            0 :     }
    6216              : 
    6217              :     /// Final step which unblocks the GC.
    6218              :     ///
    6219              :     /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
    6220            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    6221            0 :         self: &Arc<Timeline>,
    6222            0 :         tenant: &crate::tenant::TenantShard,
    6223            0 :         attempt: detach_ancestor::Attempt,
    6224            0 :         ctx: &RequestContext,
    6225            0 :     ) -> Result<(), detach_ancestor::Error> {
    6226            0 :         detach_ancestor::complete(self, tenant, attempt, ctx).await
    6227            0 :     }
    6228              : }
    6229              : 
    6230              : impl Drop for Timeline {
    6231            5 :     fn drop(&mut self) {
    6232            5 :         if let Some(ancestor) = &self.ancestor_timeline {
    6233              :             // This lock should never be poisoned, but in case it is we do a .map() instead of
    6234              :             // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
    6235            2 :             if let Ok(mut gc_info) = ancestor.gc_info.write() {
    6236            2 :                 if !gc_info.remove_child_not_offloaded(self.timeline_id) {
    6237            0 :                     tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
    6238            0 :                         "Couldn't remove retain_lsn entry from timeline's parent on drop: already removed");
    6239            2 :                 }
    6240            0 :             }
    6241            3 :         }
    6242            5 :         info!(
    6243            0 :             "Timeline {} for tenant {} is being dropped",
    6244              :             self.timeline_id, self.tenant_shard_id.tenant_id
    6245              :         );
    6246            5 :     }
    6247              : }
    6248              : 
    6249              : pub(crate) use compaction_error::CompactionError;
    6250              : /// In a private mod to enforce that [`CompactionError::is_cancel`] is used
    6251              : /// instead of `match`ing on [`CompactionError::ShuttingDown`].
    6252              : mod compaction_error {
    6253              :     use utils::sync::gate::GateError;
    6254              : 
    6255              :     use crate::{
    6256              :         pgdatadir_mapping::CollectKeySpaceError,
    6257              :         tenant::{PageReconstructError, blob_io::WriteBlobError, upload_queue::NotInitialized},
    6258              :         virtual_file::owned_buffers_io::write::FlushTaskError,
    6259              :     };
    6260              : 
    6261              :     /// Top-level failure to compact. Use [`Self::is_cancel`].
    6262              :     #[derive(Debug, thiserror::Error)]
    6263              :     pub(crate) enum CompactionError {
    6264              :         /// Use [`Self::is_cancel`] instead of checking for this variant.
    6265              :         #[error("The timeline or pageserver is shutting down")]
    6266              :         #[allow(private_interfaces)]
    6267              :         ShuttingDown(ForbidMatching), // private ForbidMatching enforces use of [`Self::is_cancel`].
    6268              :         #[error(transparent)]
    6269              :         Other(anyhow::Error),
    6270              :     }
    6271              : 
    6272              :     #[derive(Debug)]
    6273              :     struct ForbidMatching;
    6274              : 
    6275              :     impl CompactionError {
    6276            0 :         pub fn new_cancelled() -> Self {
    6277            0 :             Self::ShuttingDown(ForbidMatching)
    6278            0 :         }
    6279              :         /// Errors that can be ignored, i.e., cancel and shutdown.
    6280            0 :         pub fn is_cancel(&self) -> bool {
    6281            0 :             let other = match self {
    6282            0 :                 CompactionError::ShuttingDown(_) => return true,
    6283            0 :                 CompactionError::Other(other) => other,
    6284              :             };
    6285              : 
    6286              :             // The write path of compaction in particular often lacks differentiated
    6287              :             // handling errors stemming from cancellation from other errors.
    6288              :             // So, if requested, we also check the ::Other variant by downcasting.
    6289              :             // The list below has been found empirically from flaky tests and production logs.
    6290              :             // The process is simple: on ::Other(), compaction will print the enclosed
    6291              :             // anyhow::Error in debug mode, i.e., with backtrace. That backtrace contains the
    6292              :             // line where the write path / compaction code does undifferentiated error handling
    6293              :             // from a non-anyhow type to an anyhow type. Add the type to the list of downcasts
    6294              :             // below, following the same is_cancel() pattern.
    6295              : 
    6296            0 :             let root_cause = other.root_cause();
    6297              : 
    6298            0 :             let upload_queue = root_cause
    6299            0 :                 .downcast_ref::<NotInitialized>()
    6300            0 :                 .is_some_and(|e| e.is_stopping());
    6301            0 :             let timeline = root_cause
    6302            0 :                 .downcast_ref::<PageReconstructError>()
    6303            0 :                 .is_some_and(|e| e.is_cancel());
    6304            0 :             let buffered_writer_flush_task_canelled = root_cause
    6305            0 :                 .downcast_ref::<FlushTaskError>()
    6306            0 :                 .is_some_and(|e| e.is_cancel());
    6307            0 :             let write_blob_cancelled = root_cause
    6308            0 :                 .downcast_ref::<WriteBlobError>()
    6309            0 :                 .is_some_and(|e| e.is_cancel());
    6310            0 :             let gate_closed = root_cause
    6311            0 :                 .downcast_ref::<GateError>()
    6312            0 :                 .is_some_and(|e| e.is_cancel());
    6313            0 :             upload_queue
    6314            0 :                 || timeline
    6315            0 :                 || buffered_writer_flush_task_canelled
    6316            0 :                 || write_blob_cancelled
    6317            0 :                 || gate_closed
    6318            0 :         }
    6319            0 :         pub fn into_anyhow(self) -> anyhow::Error {
    6320            0 :             match self {
    6321            0 :                 CompactionError::ShuttingDown(ForbidMatching) => anyhow::Error::new(self),
    6322            0 :                 CompactionError::Other(e) => e,
    6323              :             }
    6324            0 :         }
    6325            0 :         pub fn from_collect_keyspace(err: CollectKeySpaceError) -> Self {
    6326            0 :             if err.is_cancel() {
    6327            0 :                 Self::new_cancelled()
    6328              :             } else {
    6329            0 :                 Self::Other(err.into_anyhow())
    6330              :             }
    6331            0 :         }
    6332              :     }
    6333              : }
    6334              : 
    6335              : impl From<super::upload_queue::NotInitialized> for CompactionError {
    6336            0 :     fn from(value: super::upload_queue::NotInitialized) -> Self {
    6337            0 :         match value {
    6338              :             super::upload_queue::NotInitialized::Uninitialized => {
    6339            0 :                 CompactionError::Other(anyhow::anyhow!(value))
    6340              :             }
    6341              :             super::upload_queue::NotInitialized::ShuttingDown
    6342            0 :             | super::upload_queue::NotInitialized::Stopped => CompactionError::new_cancelled(),
    6343              :         }
    6344            0 :     }
    6345              : }
    6346              : 
    6347              : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
    6348            0 :     fn from(e: super::storage_layer::layer::DownloadError) -> Self {
    6349            0 :         match e {
    6350              :             super::storage_layer::layer::DownloadError::TimelineShutdown
    6351              :             | super::storage_layer::layer::DownloadError::DownloadCancelled => {
    6352            0 :                 CompactionError::new_cancelled()
    6353              :             }
    6354              :             super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
    6355              :             | super::storage_layer::layer::DownloadError::DownloadRequired
    6356              :             | super::storage_layer::layer::DownloadError::NotFile(_)
    6357              :             | super::storage_layer::layer::DownloadError::DownloadFailed
    6358              :             | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
    6359            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    6360              :             }
    6361              :             #[cfg(test)]
    6362              :             super::storage_layer::layer::DownloadError::Failpoint(_) => {
    6363            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    6364              :             }
    6365              :         }
    6366            0 :     }
    6367              : }
    6368              : 
    6369              : impl From<layer_manager::Shutdown> for CompactionError {
    6370            0 :     fn from(_: layer_manager::Shutdown) -> Self {
    6371            0 :         CompactionError::new_cancelled()
    6372            0 :     }
    6373              : }
    6374              : 
    6375              : impl From<super::storage_layer::errors::PutError> for CompactionError {
    6376            0 :     fn from(e: super::storage_layer::errors::PutError) -> Self {
    6377            0 :         if e.is_cancel() {
    6378            0 :             CompactionError::new_cancelled()
    6379              :         } else {
    6380            0 :             CompactionError::Other(e.into_anyhow())
    6381              :         }
    6382            0 :     }
    6383              : }
    6384              : 
    6385              : #[serde_as]
    6386              : #[derive(serde::Serialize)]
    6387              : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
    6388              : 
    6389              : #[derive(Default)]
    6390              : enum DurationRecorder {
    6391              :     #[default]
    6392              :     NotStarted,
    6393              :     Recorded(RecordedDuration, tokio::time::Instant),
    6394              : }
    6395              : 
    6396              : impl DurationRecorder {
    6397          115 :     fn till_now(&self) -> DurationRecorder {
    6398          115 :         match self {
    6399              :             DurationRecorder::NotStarted => {
    6400            0 :                 panic!("must only call on recorded measurements")
    6401              :             }
    6402          115 :             DurationRecorder::Recorded(_, ended) => {
    6403          115 :                 let now = tokio::time::Instant::now();
    6404          115 :                 DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
    6405              :             }
    6406              :         }
    6407          115 :     }
    6408          138 :     fn into_recorded(self) -> Option<RecordedDuration> {
    6409          138 :         match self {
    6410            0 :             DurationRecorder::NotStarted => None,
    6411          138 :             DurationRecorder::Recorded(recorded, _) => Some(recorded),
    6412              :         }
    6413          138 :     }
    6414              : }
    6415              : 
    6416              : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
    6417              : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
    6418              : /// the layer descriptor requires the user to provide the ranges, which should cover all
    6419              : /// keys specified in the `data` field.
    6420              : #[cfg(test)]
    6421              : #[derive(Clone)]
    6422              : pub struct DeltaLayerTestDesc {
    6423              :     pub lsn_range: Range<Lsn>,
    6424              :     pub key_range: Range<Key>,
    6425              :     pub data: Vec<(Key, Lsn, Value)>,
    6426              : }
    6427              : 
    6428              : #[cfg(test)]
    6429              : #[derive(Clone)]
    6430              : pub struct InMemoryLayerTestDesc {
    6431              :     pub lsn_range: Range<Lsn>,
    6432              :     pub data: Vec<(Key, Lsn, Value)>,
    6433              :     pub is_open: bool,
    6434              : }
    6435              : 
    6436              : #[cfg(test)]
    6437              : impl DeltaLayerTestDesc {
    6438            2 :     pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
    6439            2 :         Self {
    6440            2 :             lsn_range,
    6441            2 :             key_range,
    6442            2 :             data,
    6443            2 :         }
    6444            2 :     }
    6445              : 
    6446           45 :     pub fn new_with_inferred_key_range(
    6447           45 :         lsn_range: Range<Lsn>,
    6448           45 :         data: Vec<(Key, Lsn, Value)>,
    6449           45 :     ) -> Self {
    6450           45 :         let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
    6451           45 :         let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
    6452           45 :         Self {
    6453           45 :             key_range: (*key_min)..(key_max.next()),
    6454           45 :             lsn_range,
    6455           45 :             data,
    6456           45 :         }
    6457           45 :     }
    6458              : 
    6459            5 :     pub(crate) fn layer_name(&self) -> LayerName {
    6460            5 :         LayerName::Delta(super::storage_layer::DeltaLayerName {
    6461            5 :             key_range: self.key_range.clone(),
    6462            5 :             lsn_range: self.lsn_range.clone(),
    6463            5 :         })
    6464            5 :     }
    6465              : }
    6466              : 
    6467              : impl Timeline {
    6468           23 :     async fn finish_compact_batch(
    6469           23 :         self: &Arc<Self>,
    6470           23 :         new_deltas: &[ResidentLayer],
    6471           23 :         new_images: &[ResidentLayer],
    6472           23 :         layers_to_remove: &[Layer],
    6473           23 :     ) -> Result<(), CompactionError> {
    6474           23 :         let mut guard = tokio::select! {
    6475           23 :             guard = self.layers.write(LayerManagerLockHolder::Compaction) => guard,
    6476           23 :             _ = self.cancel.cancelled() => {
    6477            0 :                 return Err(CompactionError::new_cancelled());
    6478              :             }
    6479              :         };
    6480              : 
    6481           23 :         let mut duplicated_layers = HashSet::new();
    6482              : 
    6483           23 :         let mut insert_layers = Vec::with_capacity(new_deltas.len());
    6484              : 
    6485          186 :         for l in new_deltas {
    6486          163 :             if guard.contains(l.as_ref()) {
    6487              :                 // expected in tests
    6488            0 :                 tracing::error!(layer=%l, "duplicated L1 layer");
    6489              : 
    6490              :                 // good ways to cause a duplicate: we repeatedly error after taking the writelock
    6491              :                 // `guard`  on self.layers. as of writing this, there are no error returns except
    6492              :                 // for compact_level0_phase1 creating an L0, which does not happen in practice
    6493              :                 // because we have not implemented L0 => L0 compaction.
    6494            0 :                 duplicated_layers.insert(l.layer_desc().key());
    6495          163 :             } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
    6496            0 :                 return Err(CompactionError::Other(anyhow::anyhow!(
    6497            0 :                     "compaction generates a L0 layer file as output, which will cause infinite compaction."
    6498            0 :                 )));
    6499          163 :             } else {
    6500          163 :                 insert_layers.push(l.clone());
    6501          163 :             }
    6502              :         }
    6503              : 
    6504              :         // only remove those inputs which were not outputs
    6505           23 :         let remove_layers: Vec<Layer> = layers_to_remove
    6506           23 :             .iter()
    6507          201 :             .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
    6508           23 :             .cloned()
    6509           23 :             .collect();
    6510              : 
    6511           23 :         if !new_images.is_empty() {
    6512            0 :             guard
    6513            0 :                 .open_mut()?
    6514            0 :                 .track_new_image_layers(new_images, &self.metrics);
    6515           23 :         }
    6516              : 
    6517           23 :         guard
    6518           23 :             .open_mut()?
    6519           23 :             .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
    6520              : 
    6521           23 :         self.remote_client
    6522           23 :             .schedule_compaction_update(&remove_layers, new_deltas)?;
    6523              : 
    6524           23 :         drop_layer_manager_wlock(guard);
    6525              : 
    6526           23 :         Ok(())
    6527           23 :     }
    6528              : 
    6529            0 :     async fn rewrite_layers(
    6530            0 :         self: &Arc<Self>,
    6531            0 :         mut replace_layers: Vec<(Layer, ResidentLayer)>,
    6532            0 :         mut drop_layers: Vec<Layer>,
    6533            0 :     ) -> Result<(), CompactionError> {
    6534            0 :         let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await;
    6535              : 
    6536              :         // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
    6537              :         // to avoid double-removing, and avoid rewriting something that was removed.
    6538            0 :         replace_layers.retain(|(l, _)| guard.contains(l));
    6539            0 :         drop_layers.retain(|l| guard.contains(l));
    6540              : 
    6541            0 :         guard
    6542            0 :             .open_mut()?
    6543            0 :             .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
    6544              : 
    6545            0 :         let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
    6546              : 
    6547            0 :         self.remote_client
    6548            0 :             .schedule_compaction_update(&drop_layers, &upload_layers)?;
    6549              : 
    6550            0 :         Ok(())
    6551            0 :     }
    6552              : 
    6553              :     /// Schedules the uploads of the given image layers
    6554           80 :     fn upload_new_image_layers(
    6555           80 :         self: &Arc<Self>,
    6556           80 :         new_images: impl IntoIterator<Item = ResidentLayer>,
    6557           80 :     ) -> Result<(), super::upload_queue::NotInitialized> {
    6558           93 :         for layer in new_images {
    6559           13 :             self.remote_client.schedule_layer_file_upload(layer)?;
    6560              :         }
    6561              :         // should any new image layer been created, not uploading index_part will
    6562              :         // result in a mismatch between remote_physical_size and layermap calculated
    6563              :         // size, which will fail some tests, but should not be an issue otherwise.
    6564           80 :         self.remote_client
    6565           80 :             .schedule_index_upload_for_file_changes()?;
    6566           80 :         Ok(())
    6567           80 :     }
    6568              : 
    6569            0 :     async fn find_gc_time_cutoff(
    6570            0 :         &self,
    6571            0 :         now: SystemTime,
    6572            0 :         pitr: Duration,
    6573            0 :         cancel: &CancellationToken,
    6574            0 :         ctx: &RequestContext,
    6575            0 :     ) -> Result<Option<Lsn>, PageReconstructError> {
    6576            0 :         debug_assert_current_span_has_tenant_and_timeline_id();
    6577            0 :         if self.shard_identity.is_shard_zero() {
    6578              :             // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
    6579            0 :             let time_range = if pitr == Duration::ZERO {
    6580            0 :                 humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
    6581              :             } else {
    6582            0 :                 pitr
    6583              :             };
    6584              : 
    6585              :             // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
    6586            0 :             let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
    6587            0 :             let timestamp = to_pg_timestamp(time_cutoff);
    6588              : 
    6589            0 :             let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
    6590            0 :                 LsnForTimestamp::Present(lsn) => Some(lsn),
    6591            0 :                 LsnForTimestamp::Future(lsn) => {
    6592              :                     // The timestamp is in the future. That sounds impossible,
    6593              :                     // but what it really means is that there hasn't been
    6594              :                     // any commits since the cutoff timestamp.
    6595              :                     //
    6596              :                     // In this case we should use the LSN of the most recent commit,
    6597              :                     // which is implicitly the last LSN in the log.
    6598            0 :                     debug!("future({})", lsn);
    6599            0 :                     Some(self.get_last_record_lsn())
    6600              :                 }
    6601            0 :                 LsnForTimestamp::Past(lsn) => {
    6602            0 :                     debug!("past({})", lsn);
    6603            0 :                     None
    6604              :                 }
    6605            0 :                 LsnForTimestamp::NoData(lsn) => {
    6606            0 :                     debug!("nodata({})", lsn);
    6607            0 :                     None
    6608              :                 }
    6609              :             };
    6610            0 :             Ok(time_cutoff)
    6611              :         } else {
    6612              :             // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
    6613              :             // from shard zero's index.  The index doesn't explicitly tell us the time cutoff, but we may assume that
    6614              :             // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
    6615              :             // space cutoff that we would also have respected ourselves.
    6616            0 :             match self
    6617            0 :                 .remote_client
    6618            0 :                 .download_foreign_index(ShardNumber(0), cancel)
    6619            0 :                 .await
    6620              :             {
    6621            0 :                 Ok((index_part, index_generation, _index_mtime)) => {
    6622            0 :                     tracing::info!(
    6623            0 :                         "GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
    6624            0 :                         index_part.metadata.latest_gc_cutoff_lsn()
    6625              :                     );
    6626            0 :                     Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
    6627              :                 }
    6628              :                 Err(DownloadError::NotFound) => {
    6629              :                     // This is unexpected, because during timeline creations shard zero persists to remote
    6630              :                     // storage before other shards are called, and during timeline deletion non-zeroth shards are
    6631              :                     // deleted before the zeroth one.  However, it should be harmless: if we somehow end up in this
    6632              :                     // state, then shard zero should _eventually_ write an index when it GCs.
    6633            0 :                     tracing::warn!("GC couldn't find shard zero's index for timeline");
    6634            0 :                     Ok(None)
    6635              :                 }
    6636            0 :                 Err(e) => {
    6637              :                     // TODO: this function should return a different error type than page reconstruct error
    6638            0 :                     Err(PageReconstructError::Other(anyhow::anyhow!(e)))
    6639              :                 }
    6640              :             }
    6641              : 
    6642              :             // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
    6643              :             // controller.  Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
    6644              :             // is going through a migration if we read the old location's index and it has GC'd ahead of the
    6645              :             // new location.  This is legal in principle, but problematic in practice because it might result
    6646              :             // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
    6647              :             // because they have GC'd past the branch point.
    6648              :         }
    6649            0 :     }
    6650              : 
    6651              :     /// Find the Lsns above which layer files need to be retained on
    6652              :     /// garbage collection.
    6653              :     ///
    6654              :     /// We calculate two cutoffs, one based on time and one based on WAL size.  `pitr`
    6655              :     /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
    6656              :     /// the space-based retention.
    6657              :     ///
    6658              :     /// This function doesn't simply to calculate time & space based retention: it treats time-based
    6659              :     /// retention as authoritative if enabled, and falls back to space-based retention if calculating
    6660              :     /// the LSN for a time point isn't possible.  Therefore the GcCutoffs::horizon in the response might
    6661              :     /// be different to the `space_cutoff` input.  Callers should treat the min() of the two cutoffs
    6662              :     /// in the response as the GC cutoff point for the timeline.
    6663              :     #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
    6664              :     pub(super) async fn find_gc_cutoffs(
    6665              :         &self,
    6666              :         now: SystemTime,
    6667              :         space_cutoff: Lsn,
    6668              :         pitr: Duration,
    6669              :         cancel: &CancellationToken,
    6670              :         ctx: &RequestContext,
    6671              :     ) -> Result<GcCutoffs, PageReconstructError> {
    6672              :         let _timer = self
    6673              :             .metrics
    6674              :             .find_gc_cutoffs_histo
    6675              :             .start_timer()
    6676              :             .record_on_drop();
    6677              : 
    6678              :         pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
    6679              : 
    6680              :         if cfg!(test) && pitr == Duration::ZERO {
    6681              :             // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
    6682              :             return Ok(GcCutoffs {
    6683              :                 time: Some(self.get_last_record_lsn()),
    6684              :                 space: space_cutoff,
    6685              :             });
    6686              :         }
    6687              : 
    6688              :         // Calculate a time-based limit on how much to retain:
    6689              :         // - if PITR interval is set, then this is our cutoff.
    6690              :         // - if PITR interval is not set, then we do a lookup
    6691              :         //   based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
    6692              :         let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
    6693              : 
    6694              :         Ok(match (pitr, time_cutoff) {
    6695              :             (Duration::ZERO, Some(time_cutoff)) => {
    6696              :                 // PITR is not set. Retain the size-based limit, or the default time retention,
    6697              :                 // whichever requires less data.
    6698              :                 GcCutoffs {
    6699              :                     time: Some(self.get_last_record_lsn()),
    6700              :                     space: std::cmp::max(time_cutoff, space_cutoff),
    6701              :                 }
    6702              :             }
    6703              :             (Duration::ZERO, None) => {
    6704              :                 // PITR is not set, and time lookup failed
    6705              :                 GcCutoffs {
    6706              :                     time: Some(self.get_last_record_lsn()),
    6707              :                     space: space_cutoff,
    6708              :                 }
    6709              :             }
    6710              :             (_, None) => {
    6711              :                 // PITR interval is set & we didn't look up a timestamp successfully.  Conservatively assume PITR
    6712              :                 // cannot advance beyond what was already GC'd, and respect space-based retention
    6713              :                 GcCutoffs {
    6714              :                     time: Some(*self.get_applied_gc_cutoff_lsn()),
    6715              :                     space: space_cutoff,
    6716              :                 }
    6717              :             }
    6718              :             (_, Some(time_cutoff)) => {
    6719              :                 // PITR interval is set and we looked up timestamp successfully.  Ignore
    6720              :                 // size based retention and make time cutoff authoritative
    6721              :                 GcCutoffs {
    6722              :                     time: Some(time_cutoff),
    6723              :                     space: time_cutoff,
    6724              :                 }
    6725              :             }
    6726              :         })
    6727              :     }
    6728              : 
    6729              :     /// Garbage collect layer files on a timeline that are no longer needed.
    6730              :     ///
    6731              :     /// Currently, we don't make any attempt at removing unneeded page versions
    6732              :     /// within a layer file. We can only remove the whole file if it's fully
    6733              :     /// obsolete.
    6734          377 :     pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
    6735              :         // this is most likely the background tasks, but it might be the spawned task from
    6736              :         // immediate_gc
    6737          377 :         let _g = tokio::select! {
    6738          377 :             guard = self.gc_lock.lock() => guard,
    6739          377 :             _ = self.cancel.cancelled() => return Ok(GcResult::default()),
    6740              :         };
    6741          376 :         let timer = self.metrics.garbage_collect_histo.start_timer();
    6742              : 
    6743          376 :         fail_point!("before-timeline-gc");
    6744              : 
    6745              :         // Is the timeline being deleted?
    6746          376 :         if self.is_stopping() {
    6747            0 :             return Err(GcError::TimelineCancelled);
    6748          376 :         }
    6749              : 
    6750          376 :         let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
    6751          376 :             let gc_info = self.gc_info.read().unwrap();
    6752              : 
    6753          376 :             let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
    6754          376 :             let time_cutoff = gc_info.cutoffs.time;
    6755          376 :             let retain_lsns = gc_info
    6756          376 :                 .retain_lsns
    6757          376 :                 .iter()
    6758          376 :                 .map(|(lsn, _child_id, _is_offloaded)| *lsn)
    6759          376 :                 .collect();
    6760              : 
    6761              :             // Gets the maximum LSN that holds the valid lease.
    6762              :             //
    6763              :             // Caveat: `refresh_gc_info` is in charged of updating the lease map.
    6764              :             // Here, we do not check for stale leases again.
    6765          376 :             let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
    6766              : 
    6767          376 :             (
    6768          376 :                 space_cutoff,
    6769          376 :                 time_cutoff,
    6770          376 :                 retain_lsns,
    6771          376 :                 max_lsn_with_valid_lease,
    6772          376 :             )
    6773              :         };
    6774              : 
    6775          376 :         let mut new_gc_cutoff = space_cutoff.min(time_cutoff.unwrap_or_default());
    6776          376 :         let standby_horizon = self.standby_horizon.load();
    6777              :         // Hold GC for the standby, but as a safety guard do it only within some
    6778              :         // reasonable lag.
    6779          376 :         if standby_horizon != Lsn::INVALID {
    6780            0 :             if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
    6781              :                 const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
    6782            0 :                 if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
    6783            0 :                     new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
    6784            0 :                     trace!("holding off GC for standby apply LSN {}", standby_horizon);
    6785              :                 } else {
    6786            0 :                     warn!(
    6787            0 :                         "standby is lagging for more than {}MB, not holding gc for it",
    6788            0 :                         MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
    6789              :                     )
    6790              :                 }
    6791            0 :             }
    6792          376 :         }
    6793              : 
    6794              :         // Reset standby horizon to ignore it if it is not updated till next GC.
    6795              :         // It is an easy way to unset it when standby disappears without adding
    6796              :         // more conf options.
    6797          376 :         self.standby_horizon.store(Lsn::INVALID);
    6798          376 :         self.metrics
    6799          376 :             .standby_horizon_gauge
    6800          376 :             .set(Lsn::INVALID.0 as i64);
    6801              : 
    6802          376 :         let res = self
    6803          376 :             .gc_timeline(
    6804          376 :                 space_cutoff,
    6805          376 :                 time_cutoff,
    6806          376 :                 retain_lsns,
    6807          376 :                 max_lsn_with_valid_lease,
    6808          376 :                 new_gc_cutoff,
    6809              :             )
    6810          376 :             .instrument(
    6811          376 :                 info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
    6812              :             )
    6813          376 :             .await?;
    6814              : 
    6815              :         // only record successes
    6816          376 :         timer.stop_and_record();
    6817              : 
    6818          376 :         Ok(res)
    6819          377 :     }
    6820              : 
    6821          376 :     async fn gc_timeline(
    6822          376 :         &self,
    6823          376 :         space_cutoff: Lsn,
    6824          376 :         time_cutoff: Option<Lsn>, // None if uninitialized
    6825          376 :         retain_lsns: Vec<Lsn>,
    6826          376 :         max_lsn_with_valid_lease: Option<Lsn>,
    6827          376 :         new_gc_cutoff: Lsn,
    6828          376 :     ) -> Result<GcResult, GcError> {
    6829              :         // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
    6830              : 
    6831          376 :         let now = SystemTime::now();
    6832          376 :         let mut result: GcResult = GcResult::default();
    6833              : 
    6834              :         // Nothing to GC. Return early.
    6835          376 :         let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
    6836          376 :         if latest_gc_cutoff >= new_gc_cutoff {
    6837           11 :             info!(
    6838            0 :                 "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
    6839              :             );
    6840           11 :             return Ok(result);
    6841          365 :         }
    6842              : 
    6843          365 :         let Some(time_cutoff) = time_cutoff else {
    6844              :             // The GC cutoff should have been computed by now, but let's be defensive.
    6845            0 :             info!("Nothing to GC: time_cutoff not yet computed");
    6846            0 :             return Ok(result);
    6847              :         };
    6848              : 
    6849              :         // We need to ensure that no one tries to read page versions or create
    6850              :         // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
    6851              :         // for details. This will block until the old value is no longer in use.
    6852              :         //
    6853              :         // The GC cutoff should only ever move forwards.
    6854          365 :         let waitlist = {
    6855          365 :             let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
    6856          365 :             if *write_guard > new_gc_cutoff {
    6857            0 :                 return Err(GcError::BadLsn {
    6858            0 :                     why: format!(
    6859            0 :                         "Cannot move GC cutoff LSN backwards (was {}, new {})",
    6860            0 :                         *write_guard, new_gc_cutoff
    6861            0 :                     ),
    6862            0 :                 });
    6863          365 :             }
    6864              : 
    6865          365 :             write_guard.store_and_unlock(new_gc_cutoff)
    6866              :         };
    6867          365 :         waitlist.wait().await;
    6868              : 
    6869          365 :         info!("GC starting");
    6870              : 
    6871          365 :         debug!("retain_lsns: {:?}", retain_lsns);
    6872              : 
    6873          365 :         let max_retain_lsn = retain_lsns.iter().max();
    6874              : 
    6875              :         // Scan all layers in the timeline (remote or on-disk).
    6876              :         //
    6877              :         // Garbage collect the layer if all conditions are satisfied:
    6878              :         // 1. it is older than cutoff LSN;
    6879              :         // 2. it is older than PITR interval;
    6880              :         // 3. it doesn't need to be retained for 'retain_lsns';
    6881              :         // 4. it does not need to be kept for LSNs holding valid leases.
    6882              :         // 5. newer on-disk image layers cover the layer's whole key range
    6883          365 :         let layers_to_remove = {
    6884          365 :             let mut layers_to_remove = Vec::new();
    6885              : 
    6886          365 :             let guard = self
    6887          365 :                 .layers
    6888          365 :                 .read(LayerManagerLockHolder::GarbageCollection)
    6889          365 :                 .await;
    6890          365 :             let layers = guard.layer_map()?;
    6891         6206 :             'outer: for l in layers.iter_historic_layers() {
    6892         6206 :                 result.layers_total += 1;
    6893              : 
    6894              :                 // 1. Is it newer than GC horizon cutoff point?
    6895         6206 :                 if l.get_lsn_range().end > space_cutoff {
    6896          370 :                     debug!(
    6897            0 :                         "keeping {} because it's newer than space_cutoff {}",
    6898            0 :                         l.layer_name(),
    6899              :                         space_cutoff,
    6900              :                     );
    6901          370 :                     result.layers_needed_by_cutoff += 1;
    6902          370 :                     continue 'outer;
    6903         5836 :                 }
    6904              : 
    6905              :                 // 2. It is newer than PiTR cutoff point?
    6906         5836 :                 if l.get_lsn_range().end > time_cutoff {
    6907            0 :                     debug!(
    6908            0 :                         "keeping {} because it's newer than time_cutoff {}",
    6909            0 :                         l.layer_name(),
    6910              :                         time_cutoff,
    6911              :                     );
    6912            0 :                     result.layers_needed_by_pitr += 1;
    6913            0 :                     continue 'outer;
    6914         5836 :                 }
    6915              : 
    6916              :                 // 3. Is it needed by a child branch?
    6917              :                 // NOTE With that we would keep data that
    6918              :                 // might be referenced by child branches forever.
    6919              :                 // We can track this in child timeline GC and delete parent layers when
    6920              :                 // they are no longer needed. This might be complicated with long inheritance chains.
    6921         5836 :                 if let Some(retain_lsn) = max_retain_lsn {
    6922              :                     // start_lsn is inclusive
    6923            4 :                     if &l.get_lsn_range().start <= retain_lsn {
    6924            4 :                         debug!(
    6925            0 :                             "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
    6926            0 :                             l.layer_name(),
    6927              :                             retain_lsn,
    6928            0 :                             l.is_incremental(),
    6929              :                         );
    6930            4 :                         result.layers_needed_by_branches += 1;
    6931            4 :                         continue 'outer;
    6932            0 :                     }
    6933         5832 :                 }
    6934              : 
    6935              :                 // 4. Is there a valid lease that requires us to keep this layer?
    6936         5832 :                 if let Some(lsn) = &max_lsn_with_valid_lease {
    6937              :                     // keep if layer start <= any of the lease
    6938            9 :                     if &l.get_lsn_range().start <= lsn {
    6939            7 :                         debug!(
    6940            0 :                             "keeping {} because there is a valid lease preventing GC at {}",
    6941            0 :                             l.layer_name(),
    6942              :                             lsn,
    6943              :                         );
    6944            7 :                         result.layers_needed_by_leases += 1;
    6945            7 :                         continue 'outer;
    6946            2 :                     }
    6947         5823 :                 }
    6948              : 
    6949              :                 // 5. Is there a later on-disk layer for this relation?
    6950              :                 //
    6951              :                 // The end-LSN is exclusive, while disk_consistent_lsn is
    6952              :                 // inclusive. For example, if disk_consistent_lsn is 100, it is
    6953              :                 // OK for a delta layer to have end LSN 101, but if the end LSN
    6954              :                 // is 102, then it might not have been fully flushed to disk
    6955              :                 // before crash.
    6956              :                 //
    6957              :                 // For example, imagine that the following layers exist:
    6958              :                 //
    6959              :                 // 1000      - image (A)
    6960              :                 // 1000-2000 - delta (B)
    6961              :                 // 2000      - image (C)
    6962              :                 // 2000-3000 - delta (D)
    6963              :                 // 3000      - image (E)
    6964              :                 //
    6965              :                 // If GC horizon is at 2500, we can remove layers A and B, but
    6966              :                 // we cannot remove C, even though it's older than 2500, because
    6967              :                 // the delta layer 2000-3000 depends on it.
    6968         5825 :                 if !layers
    6969         5825 :                     .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
    6970              :                 {
    6971         5821 :                     debug!("keeping {} because it is the latest layer", l.layer_name());
    6972         5821 :                     result.layers_not_updated += 1;
    6973         5821 :                     continue 'outer;
    6974            4 :                 }
    6975              : 
    6976              :                 // We didn't find any reason to keep this file, so remove it.
    6977            4 :                 info!(
    6978            0 :                     "garbage collecting {} is_dropped: xx is_incremental: {}",
    6979            0 :                     l.layer_name(),
    6980            0 :                     l.is_incremental(),
    6981              :                 );
    6982            4 :                 layers_to_remove.push(l);
    6983              :             }
    6984              : 
    6985          365 :             layers_to_remove
    6986              :         };
    6987              : 
    6988          365 :         if !layers_to_remove.is_empty() {
    6989              :             // Persist the new GC cutoff value before we actually remove anything.
    6990              :             // This unconditionally schedules also an index_part.json update, even though, we will
    6991              :             // be doing one a bit later with the unlinked gc'd layers.
    6992            3 :             let disk_consistent_lsn = self.disk_consistent_lsn.load();
    6993            3 :             self.schedule_uploads(disk_consistent_lsn, None)
    6994            3 :                 .map_err(|e| {
    6995            0 :                     if self.cancel.is_cancelled() {
    6996            0 :                         GcError::TimelineCancelled
    6997              :                     } else {
    6998            0 :                         GcError::Remote(e)
    6999              :                     }
    7000            0 :                 })?;
    7001              : 
    7002            3 :             let mut guard = self
    7003            3 :                 .layers
    7004            3 :                 .write(LayerManagerLockHolder::GarbageCollection)
    7005            3 :                 .await;
    7006              : 
    7007            3 :             let gc_layers = layers_to_remove
    7008            3 :                 .iter()
    7009            4 :                 .flat_map(|desc| guard.try_get_from_key(&desc.key()).cloned())
    7010            3 :                 .collect::<Vec<Layer>>();
    7011              : 
    7012            3 :             result.layers_removed = gc_layers.len() as u64;
    7013              : 
    7014            3 :             self.remote_client.schedule_gc_update(&gc_layers)?;
    7015            3 :             guard.open_mut()?.finish_gc_timeline(&gc_layers);
    7016              : 
    7017              :             #[cfg(feature = "testing")]
    7018            3 :             {
    7019            3 :                 result.doomed_layers = gc_layers;
    7020            3 :             }
    7021          362 :         }
    7022              : 
    7023          365 :         info!(
    7024            0 :             "GC completed removing {} layers, cutoff {}",
    7025              :             result.layers_removed, new_gc_cutoff
    7026              :         );
    7027              : 
    7028          365 :         result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
    7029          365 :         Ok(result)
    7030          376 :     }
    7031              : 
    7032              :     /// Reconstruct a value, using the given base image and WAL records in 'data'.
    7033       364868 :     pub(crate) async fn reconstruct_value(
    7034       364868 :         &self,
    7035       364868 :         key: Key,
    7036       364868 :         request_lsn: Lsn,
    7037       364868 :         mut data: ValueReconstructState,
    7038       364868 :         redo_attempt_type: RedoAttemptType,
    7039       364868 :     ) -> Result<Bytes, PageReconstructError> {
    7040              :         // Perform WAL redo if needed
    7041       364868 :         data.records.reverse();
    7042              : 
    7043       364868 :         let fire_critical_error = match redo_attempt_type {
    7044       363535 :             RedoAttemptType::ReadPage => true,
    7045            0 :             RedoAttemptType::LegacyCompaction => true,
    7046         1333 :             RedoAttemptType::GcCompaction => false,
    7047              :         };
    7048              : 
    7049              :         // If we have a page image, and no WAL, we're all set
    7050       364868 :         if data.records.is_empty() {
    7051       338094 :             if let Some((img_lsn, img)) = &data.img {
    7052       338094 :                 trace!(
    7053            0 :                     "found page image for key {} at {}, no WAL redo required, req LSN {}",
    7054              :                     key, img_lsn, request_lsn,
    7055              :                 );
    7056       338094 :                 Ok(img.clone())
    7057              :             } else {
    7058            0 :                 Err(PageReconstructError::from(anyhow!(
    7059            0 :                     "base image for {key} at {request_lsn} not found"
    7060            0 :                 )))
    7061              :             }
    7062              :         } else {
    7063              :             // We need to do WAL redo.
    7064              :             //
    7065              :             // If we don't have a base image, then the oldest WAL record better initialize
    7066              :             // the page
    7067        26774 :             if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
    7068            0 :                 Err(PageReconstructError::from(anyhow!(
    7069            0 :                     "Base image for {} at {} not found, but got {} WAL records",
    7070            0 :                     key,
    7071            0 :                     request_lsn,
    7072            0 :                     data.records.len()
    7073            0 :                 )))
    7074              :             } else {
    7075        26774 :                 if data.img.is_some() {
    7076        13029 :                     trace!(
    7077            0 :                         "found {} WAL records and a base image for {} at {}, performing WAL redo",
    7078            0 :                         data.records.len(),
    7079              :                         key,
    7080              :                         request_lsn
    7081              :                     );
    7082              :                 } else {
    7083        13745 :                     trace!(
    7084            0 :                         "found {} WAL records that will init the page for {} at {}, performing WAL redo",
    7085            0 :                         data.records.len(),
    7086              :                         key,
    7087              :                         request_lsn
    7088              :                     );
    7089              :                 };
    7090        26774 :                 let res = self
    7091        26774 :                     .walredo_mgr
    7092        26774 :                     .as_ref()
    7093        26774 :                     .context("timeline has no walredo manager")
    7094        26774 :                     .map_err(PageReconstructError::WalRedo)?
    7095        26774 :                     .request_redo(
    7096        26774 :                         key,
    7097        26774 :                         request_lsn,
    7098        26774 :                         data.img,
    7099        26774 :                         data.records,
    7100        26774 :                         self.pg_version,
    7101        26774 :                         redo_attempt_type,
    7102              :                     )
    7103        26774 :                     .await;
    7104        26773 :                 let img = match res {
    7105        26773 :                     Ok(img) => img,
    7106            0 :                     Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
    7107            1 :                     Err(walredo::Error::Other(err)) => {
    7108            1 :                         if fire_critical_error {
    7109            0 :                             critical_timeline!(
    7110            0 :                                 self.tenant_shard_id,
    7111            0 :                                 self.timeline_id,
    7112            0 :                                 "walredo failure during page reconstruction: {err:?}"
    7113              :                             );
    7114            1 :                         }
    7115            1 :                         return Err(PageReconstructError::WalRedo(
    7116            1 :                             err.context("reconstruct a page image"),
    7117            1 :                         ));
    7118              :                     }
    7119              :                 };
    7120        26773 :                 Ok(img)
    7121              :             }
    7122              :         }
    7123       364868 :     }
    7124              : 
    7125            0 :     pub(crate) async fn spawn_download_all_remote_layers(
    7126            0 :         self: Arc<Self>,
    7127            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    7128            0 :         ctx: &RequestContext,
    7129            0 :     ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
    7130              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    7131              : 
    7132              :         // this is not really needed anymore; it has tests which really check the return value from
    7133              :         // http api. it would be better not to maintain this anymore.
    7134              : 
    7135            0 :         let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
    7136            0 :         if let Some(st) = &*status_guard {
    7137            0 :             match &st.state {
    7138              :                 DownloadRemoteLayersTaskState::Running => {
    7139            0 :                     return Err(st.clone());
    7140              :                 }
    7141              :                 DownloadRemoteLayersTaskState::ShutDown
    7142            0 :                 | DownloadRemoteLayersTaskState::Completed => {
    7143            0 :                     *status_guard = None;
    7144            0 :                 }
    7145              :             }
    7146            0 :         }
    7147              : 
    7148            0 :         let self_clone = Arc::clone(&self);
    7149            0 :         let task_ctx = ctx.detached_child(
    7150            0 :             TaskKind::DownloadAllRemoteLayers,
    7151            0 :             DownloadBehavior::Download,
    7152              :         );
    7153            0 :         let task_id = task_mgr::spawn(
    7154            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    7155            0 :             task_mgr::TaskKind::DownloadAllRemoteLayers,
    7156            0 :             self.tenant_shard_id,
    7157            0 :             Some(self.timeline_id),
    7158            0 :             "download all remote layers task",
    7159            0 :             async move {
    7160            0 :                 self_clone.download_all_remote_layers(request, &task_ctx).await;
    7161            0 :                 let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
    7162            0 :                  match &mut *status_guard {
    7163              :                     None => {
    7164            0 :                         warn!("tasks status is supposed to be Some(), since we are running");
    7165              :                     }
    7166            0 :                     Some(st) => {
    7167            0 :                         let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
    7168            0 :                         if st.task_id != exp_task_id {
    7169            0 :                             warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
    7170            0 :                         } else {
    7171            0 :                             st.state = DownloadRemoteLayersTaskState::Completed;
    7172            0 :                         }
    7173              :                     }
    7174              :                 };
    7175            0 :                 Ok(())
    7176            0 :             }
    7177            0 :             .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    7178              :         );
    7179              : 
    7180            0 :         let initial_info = DownloadRemoteLayersTaskInfo {
    7181            0 :             task_id: format!("{task_id}"),
    7182            0 :             state: DownloadRemoteLayersTaskState::Running,
    7183            0 :             total_layer_count: 0,
    7184            0 :             successful_download_count: 0,
    7185            0 :             failed_download_count: 0,
    7186            0 :         };
    7187            0 :         *status_guard = Some(initial_info.clone());
    7188              : 
    7189            0 :         Ok(initial_info)
    7190            0 :     }
    7191              : 
    7192            0 :     async fn download_all_remote_layers(
    7193            0 :         self: &Arc<Self>,
    7194            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    7195            0 :         ctx: &RequestContext,
    7196            0 :     ) {
    7197              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    7198              : 
    7199            0 :         let remaining = {
    7200            0 :             let guard = self
    7201            0 :                 .layers
    7202            0 :                 .read(LayerManagerLockHolder::GetLayerMapInfo)
    7203            0 :                 .await;
    7204            0 :             let Ok(lm) = guard.layer_map() else {
    7205              :                 // technically here we could look into iterating accessible layers, but downloading
    7206              :                 // all layers of a shutdown timeline makes no sense regardless.
    7207            0 :                 tracing::info!("attempted to download all layers of shutdown timeline");
    7208            0 :                 return;
    7209              :             };
    7210            0 :             lm.iter_historic_layers()
    7211            0 :                 .map(|desc| guard.get_from_desc(&desc))
    7212            0 :                 .collect::<Vec<_>>()
    7213              :         };
    7214            0 :         let total_layer_count = remaining.len();
    7215              : 
    7216              :         macro_rules! lock_status {
    7217              :             ($st:ident) => {
    7218              :                 let mut st = self.download_all_remote_layers_task_info.write().unwrap();
    7219              :                 let st = st
    7220              :                     .as_mut()
    7221              :                     .expect("this function is only called after the task has been spawned");
    7222              :                 assert_eq!(
    7223              :                     st.task_id,
    7224              :                     format!(
    7225              :                         "{}",
    7226              :                         task_mgr::current_task_id().expect("we run inside a task_mgr task")
    7227              :                     )
    7228              :                 );
    7229              :                 let $st = st;
    7230              :             };
    7231              :         }
    7232              : 
    7233              :         {
    7234            0 :             lock_status!(st);
    7235            0 :             st.total_layer_count = total_layer_count as u64;
    7236              :         }
    7237              : 
    7238            0 :         let mut remaining = remaining.into_iter();
    7239            0 :         let mut have_remaining = true;
    7240            0 :         let mut js = tokio::task::JoinSet::new();
    7241              : 
    7242            0 :         let cancel = task_mgr::shutdown_token();
    7243              : 
    7244            0 :         let limit = request.max_concurrent_downloads;
    7245              : 
    7246              :         loop {
    7247            0 :             while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
    7248            0 :                 let Some(next) = remaining.next() else {
    7249            0 :                     have_remaining = false;
    7250            0 :                     break;
    7251              :                 };
    7252              : 
    7253            0 :                 let span = tracing::info_span!("download", layer = %next);
    7254              : 
    7255            0 :                 let ctx = ctx.attached_child();
    7256            0 :                 js.spawn(
    7257            0 :                     async move {
    7258            0 :                         let res = next.download(&ctx).await;
    7259            0 :                         (next, res)
    7260            0 :                     }
    7261            0 :                     .instrument(span),
    7262              :                 );
    7263              :             }
    7264              : 
    7265            0 :             while let Some(res) = js.join_next().await {
    7266            0 :                 match res {
    7267              :                     Ok((_, Ok(_))) => {
    7268            0 :                         lock_status!(st);
    7269            0 :                         st.successful_download_count += 1;
    7270              :                     }
    7271            0 :                     Ok((layer, Err(e))) => {
    7272            0 :                         tracing::error!(%layer, "download failed: {e:#}");
    7273            0 :                         lock_status!(st);
    7274            0 :                         st.failed_download_count += 1;
    7275              :                     }
    7276            0 :                     Err(je) if je.is_cancelled() => unreachable!("not used here"),
    7277            0 :                     Err(je) if je.is_panic() => {
    7278            0 :                         lock_status!(st);
    7279            0 :                         st.failed_download_count += 1;
    7280              :                     }
    7281            0 :                     Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
    7282              :                 }
    7283              :             }
    7284              : 
    7285            0 :             if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
    7286            0 :                 break;
    7287            0 :             }
    7288              :         }
    7289              : 
    7290              :         {
    7291            0 :             lock_status!(st);
    7292            0 :             st.state = DownloadRemoteLayersTaskState::Completed;
    7293              :         }
    7294            0 :     }
    7295              : 
    7296            0 :     pub(crate) fn get_download_all_remote_layers_task_info(
    7297            0 :         &self,
    7298            0 :     ) -> Option<DownloadRemoteLayersTaskInfo> {
    7299            0 :         self.download_all_remote_layers_task_info
    7300            0 :             .read()
    7301            0 :             .unwrap()
    7302            0 :             .clone()
    7303            0 :     }
    7304              : 
    7305              :     /* BEGIN_HADRON */
    7306            0 :     pub(crate) async fn compute_image_consistent_lsn(&self) -> anyhow::Result<Lsn> {
    7307            0 :         let guard = self
    7308            0 :             .layers
    7309            0 :             .read(LayerManagerLockHolder::ComputeImageConsistentLsn)
    7310            0 :             .await;
    7311            0 :         let layer_map = guard.layer_map()?;
    7312            0 :         let disk_consistent_lsn = self.get_disk_consistent_lsn();
    7313              : 
    7314            0 :         Ok(layer_map.compute_image_consistent_lsn(disk_consistent_lsn))
    7315            0 :     }
    7316              :     /* END_HADRON */
    7317              : }
    7318              : 
    7319              : impl Timeline {
    7320              :     /// Returns non-remote layers for eviction.
    7321            0 :     pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
    7322            0 :         let guard = self.layers.read(LayerManagerLockHolder::Eviction).await;
    7323            0 :         let mut max_layer_size: Option<u64> = None;
    7324              : 
    7325            0 :         let resident_layers = guard
    7326            0 :             .likely_resident_layers()
    7327            0 :             .map(|layer| {
    7328            0 :                 let file_size = layer.layer_desc().file_size;
    7329            0 :                 max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
    7330              : 
    7331            0 :                 let last_activity_ts = layer.latest_activity();
    7332              : 
    7333            0 :                 EvictionCandidate {
    7334            0 :                     layer: layer.to_owned().into(),
    7335            0 :                     last_activity_ts,
    7336            0 :                     relative_last_activity: finite_f32::FiniteF32::ZERO,
    7337            0 :                     visibility: layer.visibility(),
    7338            0 :                 }
    7339            0 :             })
    7340            0 :             .collect();
    7341              : 
    7342            0 :         DiskUsageEvictionInfo {
    7343            0 :             max_layer_size,
    7344            0 :             resident_layers,
    7345            0 :         }
    7346            0 :     }
    7347              : 
    7348          962 :     pub(crate) fn get_shard_index(&self) -> ShardIndex {
    7349          962 :         ShardIndex {
    7350          962 :             shard_number: self.tenant_shard_id.shard_number,
    7351          962 :             shard_count: self.tenant_shard_id.shard_count,
    7352          962 :         }
    7353          962 :     }
    7354              : 
    7355              :     /// Persistently blocks gc for `Manual` reason.
    7356              :     ///
    7357              :     /// Returns true if no such block existed before, false otherwise.
    7358            0 :     pub(crate) async fn block_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<bool> {
    7359              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    7360            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    7361            0 :         tenant.gc_block.insert(self, GcBlockingReason::Manual).await
    7362            0 :     }
    7363              : 
    7364              :     /// Persistently unblocks gc for `Manual` reason.
    7365            0 :     pub(crate) async fn unblock_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<()> {
    7366              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    7367            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    7368            0 :         tenant.gc_block.remove(self, GcBlockingReason::Manual).await
    7369            0 :     }
    7370              : 
    7371              :     #[cfg(test)]
    7372           31 :     pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
    7373           31 :         self.last_record_lsn.advance(new_lsn);
    7374           31 :     }
    7375              : 
    7376              :     #[cfg(test)]
    7377            2 :     pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
    7378            2 :         self.disk_consistent_lsn.store(new_value);
    7379            2 :     }
    7380              : 
    7381              :     /// Force create an image layer and place it into the layer map.
    7382              :     ///
    7383              :     /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
    7384              :     /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
    7385              :     /// placed into the layer map in one run AND be validated.
    7386              :     #[cfg(test)]
    7387           36 :     pub(super) async fn force_create_image_layer(
    7388           36 :         self: &Arc<Timeline>,
    7389           36 :         lsn: Lsn,
    7390           36 :         mut images: Vec<(Key, Bytes)>,
    7391           36 :         check_start_lsn: Option<Lsn>,
    7392           36 :         ctx: &RequestContext,
    7393           36 :     ) -> anyhow::Result<()> {
    7394           36 :         let last_record_lsn = self.get_last_record_lsn();
    7395           36 :         assert!(
    7396           36 :             lsn <= last_record_lsn,
    7397            0 :             "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
    7398              :         );
    7399           36 :         if let Some(check_start_lsn) = check_start_lsn {
    7400           36 :             assert!(lsn >= check_start_lsn);
    7401            0 :         }
    7402          240 :         images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
    7403           36 :         let min_key = *images.first().map(|(k, _)| k).unwrap();
    7404           36 :         let end_key = images.last().map(|(k, _)| k).unwrap().next();
    7405           36 :         let mut image_layer_writer = ImageLayerWriter::new(
    7406           36 :             self.conf,
    7407           36 :             self.timeline_id,
    7408           36 :             self.tenant_shard_id,
    7409           36 :             &(min_key..end_key),
    7410           36 :             lsn,
    7411           36 :             &self.gate,
    7412           36 :             self.cancel.clone(),
    7413           36 :             ctx,
    7414           36 :         )
    7415           36 :         .await?;
    7416          312 :         for (key, img) in images {
    7417          276 :             image_layer_writer.put_image(key, img, ctx).await?;
    7418              :         }
    7419           36 :         let (desc, path) = image_layer_writer.finish(ctx).await?;
    7420           36 :         let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    7421           36 :         info!("force created image layer {}", image_layer.local_path());
    7422              :         {
    7423           36 :             let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
    7424           36 :             guard
    7425           36 :                 .open_mut()
    7426           36 :                 .unwrap()
    7427           36 :                 .force_insert_layer(image_layer.clone());
    7428              :         }
    7429              : 
    7430              :         // Update remote_timeline_client state to reflect existence of this layer
    7431           36 :         self.remote_client
    7432           36 :             .schedule_layer_file_upload(image_layer)
    7433           36 :             .unwrap();
    7434              : 
    7435           36 :         Ok(())
    7436           36 :     }
    7437              : 
    7438              :     /// Force create a delta layer and place it into the layer map.
    7439              :     ///
    7440              :     /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
    7441              :     /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
    7442              :     /// placed into the layer map in one run AND be validated.
    7443              :     #[cfg(test)]
    7444           50 :     pub(super) async fn force_create_delta_layer(
    7445           50 :         self: &Arc<Timeline>,
    7446           50 :         mut deltas: DeltaLayerTestDesc,
    7447           50 :         check_start_lsn: Option<Lsn>,
    7448           50 :         ctx: &RequestContext,
    7449           50 :     ) -> anyhow::Result<()> {
    7450           50 :         let last_record_lsn = self.get_last_record_lsn();
    7451           50 :         deltas
    7452           50 :             .data
    7453       124364 :             .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
    7454           50 :         assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
    7455           50 :         assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
    7456        10464 :         for (_, lsn, _) in &deltas.data {
    7457        10414 :             assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
    7458              :         }
    7459           50 :         assert!(
    7460           50 :             deltas.lsn_range.end <= last_record_lsn,
    7461            0 :             "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
    7462              :             deltas.lsn_range.end,
    7463              :             last_record_lsn
    7464              :         );
    7465           50 :         if let Some(check_start_lsn) = check_start_lsn {
    7466           50 :             assert!(deltas.lsn_range.start >= check_start_lsn);
    7467            0 :         }
    7468           50 :         let mut delta_layer_writer = DeltaLayerWriter::new(
    7469           50 :             self.conf,
    7470           50 :             self.timeline_id,
    7471           50 :             self.tenant_shard_id,
    7472           50 :             deltas.key_range.start,
    7473           50 :             deltas.lsn_range,
    7474           50 :             &self.gate,
    7475           50 :             self.cancel.clone(),
    7476           50 :             ctx,
    7477           50 :         )
    7478           50 :         .await?;
    7479        10464 :         for (key, lsn, val) in deltas.data {
    7480        10414 :             delta_layer_writer.put_value(key, lsn, val, ctx).await?;
    7481              :         }
    7482           50 :         let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
    7483           50 :         let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    7484           50 :         info!("force created delta layer {}", delta_layer.local_path());
    7485              :         {
    7486           50 :             let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
    7487           50 :             guard
    7488           50 :                 .open_mut()
    7489           50 :                 .unwrap()
    7490           50 :                 .force_insert_layer(delta_layer.clone());
    7491              :         }
    7492              : 
    7493              :         // Update remote_timeline_client state to reflect existence of this layer
    7494           50 :         self.remote_client
    7495           50 :             .schedule_layer_file_upload(delta_layer)
    7496           50 :             .unwrap();
    7497              : 
    7498           50 :         Ok(())
    7499           50 :     }
    7500              : 
    7501              :     /// Force create an in-memory layer and place them into the layer map.
    7502              :     #[cfg(test)]
    7503            4 :     pub(super) async fn force_create_in_memory_layer(
    7504            4 :         self: &Arc<Timeline>,
    7505            4 :         mut in_memory: InMemoryLayerTestDesc,
    7506            4 :         check_start_lsn: Option<Lsn>,
    7507            4 :         ctx: &RequestContext,
    7508            4 :     ) -> anyhow::Result<()> {
    7509              :         use utils::bin_ser::BeSer;
    7510              : 
    7511              :         // Validate LSNs
    7512            4 :         if let Some(check_start_lsn) = check_start_lsn {
    7513            4 :             assert!(in_memory.lsn_range.start >= check_start_lsn);
    7514            0 :         }
    7515              : 
    7516            4 :         let last_record_lsn = self.get_last_record_lsn();
    7517            4 :         let layer_end_lsn = if in_memory.is_open {
    7518            1 :             in_memory
    7519            1 :                 .data
    7520            1 :                 .iter()
    7521            1 :                 .map(|(_key, lsn, _value)| lsn)
    7522            1 :                 .max()
    7523            1 :                 .cloned()
    7524              :         } else {
    7525            3 :             Some(in_memory.lsn_range.end)
    7526              :         };
    7527              : 
    7528            4 :         if let Some(end) = layer_end_lsn {
    7529            4 :             assert!(
    7530            4 :                 end <= last_record_lsn,
    7531            0 :                 "advance last record lsn before inserting a layer, end_lsn={end}, last_record_lsn={last_record_lsn}",
    7532              :             );
    7533            0 :         }
    7534              : 
    7535        19820 :         in_memory.data.iter().for_each(|(_key, lsn, _value)| {
    7536        19820 :             assert!(*lsn >= in_memory.lsn_range.start);
    7537        19820 :             assert!(*lsn < in_memory.lsn_range.end);
    7538        19820 :         });
    7539              : 
    7540              :         // Build the batch
    7541            4 :         in_memory
    7542            4 :             .data
    7543       273384 :             .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
    7544              : 
    7545            4 :         let data = in_memory
    7546            4 :             .data
    7547            4 :             .into_iter()
    7548        19820 :             .map(|(key, lsn, value)| {
    7549        19820 :                 let value_size = value.serialized_size().unwrap() as usize;
    7550        19820 :                 (key.to_compact(), lsn, value_size, value)
    7551        19820 :             })
    7552            4 :             .collect::<Vec<_>>();
    7553              : 
    7554            4 :         let batch = SerializedValueBatch::from_values(data);
    7555              : 
    7556              :         // Create the in-memory layer and write the batch into it
    7557            4 :         let layer = InMemoryLayer::create(
    7558            4 :             self.conf,
    7559            4 :             self.timeline_id,
    7560            4 :             self.tenant_shard_id,
    7561            4 :             in_memory.lsn_range.start,
    7562            4 :             &self.gate,
    7563            4 :             // TODO: if we ever use this function in production code, we need to pass the real cancellation token
    7564            4 :             &CancellationToken::new(),
    7565            4 :             ctx,
    7566            4 :         )
    7567            4 :         .await
    7568            4 :         .unwrap();
    7569              : 
    7570            4 :         layer.put_batch(batch, ctx).await.unwrap();
    7571            4 :         if !in_memory.is_open {
    7572            3 :             layer.freeze(in_memory.lsn_range.end).await;
    7573            1 :         }
    7574              : 
    7575            4 :         info!("force created in-memory layer {:?}", in_memory.lsn_range);
    7576              : 
    7577              :         // Link the layer to the layer map
    7578              :         {
    7579            4 :             let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
    7580            4 :             let layer_map = guard.open_mut().unwrap();
    7581            4 :             layer_map.force_insert_in_memory_layer(Arc::new(layer));
    7582              :         }
    7583              : 
    7584            4 :         Ok(())
    7585            4 :     }
    7586              : 
    7587              :     /// Return all keys at the LSN in the image layers
    7588              :     #[cfg(test)]
    7589            3 :     pub(crate) async fn inspect_image_layers(
    7590            3 :         self: &Arc<Timeline>,
    7591            3 :         lsn: Lsn,
    7592            3 :         ctx: &RequestContext,
    7593            3 :         io_concurrency: IoConcurrency,
    7594            3 :     ) -> anyhow::Result<Vec<(Key, Bytes)>> {
    7595            3 :         let mut all_data = Vec::new();
    7596            3 :         let guard = self.layers.read(LayerManagerLockHolder::Testing).await;
    7597           17 :         for layer in guard.layer_map()?.iter_historic_layers() {
    7598           17 :             if !layer.is_delta() && layer.image_layer_lsn() == lsn {
    7599            4 :                 let layer = guard.get_from_desc(&layer);
    7600            4 :                 let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
    7601            4 :                 layer
    7602            4 :                     .get_values_reconstruct_data(
    7603            4 :                         KeySpace::single(Key::MIN..Key::MAX),
    7604            4 :                         lsn..Lsn(lsn.0 + 1),
    7605            4 :                         &mut reconstruct_data,
    7606            4 :                         ctx,
    7607            4 :                     )
    7608            4 :                     .await?;
    7609           33 :                 for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
    7610           33 :                     let v = v.collect_pending_ios().await?;
    7611           33 :                     all_data.push((k, v.img.unwrap().1));
    7612              :                 }
    7613           13 :             }
    7614              :         }
    7615            3 :         all_data.sort();
    7616            3 :         Ok(all_data)
    7617            3 :     }
    7618              : 
    7619              :     /// Get all historic layer descriptors in the layer map
    7620              :     #[cfg(test)]
    7621           12 :     pub(crate) async fn inspect_historic_layers(
    7622           12 :         self: &Arc<Timeline>,
    7623           12 :     ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
    7624           12 :         let mut layers = Vec::new();
    7625           12 :         let guard = self.layers.read(LayerManagerLockHolder::Testing).await;
    7626           57 :         for layer in guard.layer_map()?.iter_historic_layers() {
    7627           57 :             layers.push(layer.key());
    7628           57 :         }
    7629           12 :         Ok(layers)
    7630           12 :     }
    7631              : 
    7632              :     #[cfg(test)]
    7633            5 :     pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
    7634            5 :         let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
    7635            5 :         keyspace.merge(&ks);
    7636            5 :         self.extra_test_dense_keyspace.store(Arc::new(keyspace));
    7637            5 :     }
    7638              : }
    7639              : 
    7640              : /// Tracking writes ingestion does to a particular in-memory layer.
    7641              : ///
    7642              : /// Cleared upon freezing a layer.
    7643              : pub(crate) struct TimelineWriterState {
    7644              :     open_layer: Arc<InMemoryLayer>,
    7645              :     current_size: u64,
    7646              :     // Previous Lsn which passed through
    7647              :     prev_lsn: Option<Lsn>,
    7648              :     // Largest Lsn which passed through the current writer
    7649              :     max_lsn: Option<Lsn>,
    7650              :     // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
    7651              :     cached_last_freeze_at: Lsn,
    7652              : }
    7653              : 
    7654              : impl TimelineWriterState {
    7655          660 :     fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
    7656          660 :         Self {
    7657          660 :             open_layer,
    7658          660 :             current_size,
    7659          660 :             prev_lsn: None,
    7660          660 :             max_lsn: None,
    7661          660 :             cached_last_freeze_at: last_freeze_at,
    7662          660 :         }
    7663          660 :     }
    7664              : }
    7665              : 
    7666              : /// Various functions to mutate the timeline.
    7667              : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
    7668              : // This is probably considered a bad practice in Rust and should be fixed eventually,
    7669              : // but will cause large code changes.
    7670              : pub(crate) struct TimelineWriter<'a> {
    7671              :     tl: &'a Timeline,
    7672              :     write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
    7673              : }
    7674              : 
    7675              : impl Deref for TimelineWriter<'_> {
    7676              :     type Target = Timeline;
    7677              : 
    7678      4949226 :     fn deref(&self) -> &Self::Target {
    7679      4949226 :         self.tl
    7680      4949226 :     }
    7681              : }
    7682              : 
    7683              : #[derive(PartialEq)]
    7684              : enum OpenLayerAction {
    7685              :     Roll,
    7686              :     Open,
    7687              :     None,
    7688              : }
    7689              : 
    7690              : impl TimelineWriter<'_> {
    7691      2402129 :     async fn handle_open_layer_action(
    7692      2402129 :         &mut self,
    7693      2402129 :         at: Lsn,
    7694      2402129 :         action: OpenLayerAction,
    7695      2402129 :         ctx: &RequestContext,
    7696      2402129 :     ) -> anyhow::Result<&Arc<InMemoryLayer>> {
    7697      2402129 :         match action {
    7698              :             OpenLayerAction::Roll => {
    7699           40 :                 let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
    7700           40 :                 self.roll_layer(freeze_at).await?;
    7701           40 :                 self.open_layer(at, ctx).await?;
    7702              :             }
    7703          620 :             OpenLayerAction::Open => self.open_layer(at, ctx).await?,
    7704              :             OpenLayerAction::None => {
    7705      2401469 :                 assert!(self.write_guard.is_some());
    7706              :             }
    7707              :         }
    7708              : 
    7709      2402129 :         Ok(&self.write_guard.as_ref().unwrap().open_layer)
    7710      2402129 :     }
    7711              : 
    7712          660 :     async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
    7713          660 :         let layer = self
    7714          660 :             .tl
    7715          660 :             .get_layer_for_write(at, &self.write_guard, ctx)
    7716          660 :             .await?;
    7717          660 :         let initial_size = layer.len();
    7718              : 
    7719          660 :         let last_freeze_at = self.last_freeze_at.load();
    7720          660 :         self.write_guard.replace(TimelineWriterState::new(
    7721          660 :             layer,
    7722          660 :             initial_size,
    7723          660 :             last_freeze_at,
    7724          660 :         ));
    7725              : 
    7726          660 :         Ok(())
    7727          660 :     }
    7728              : 
    7729           40 :     async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
    7730           40 :         let current_size = self.write_guard.as_ref().unwrap().current_size;
    7731              : 
    7732              :         // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
    7733              :         // to propagate the backpressure up into WAL ingestion.
    7734           40 :         let l0_count = self
    7735           40 :             .tl
    7736           40 :             .layers
    7737           40 :             .read(LayerManagerLockHolder::GetLayerMapInfo)
    7738           40 :             .await
    7739           40 :             .layer_map()?
    7740           40 :             .level0_deltas()
    7741           40 :             .len();
    7742           40 :         let wait_thresholds = [
    7743           40 :             self.get_l0_flush_delay_threshold(),
    7744           40 :             self.get_l0_flush_stall_threshold(),
    7745           40 :         ];
    7746           40 :         let wait_threshold = wait_thresholds.into_iter().flatten().min();
    7747              : 
    7748              :         // self.write_guard will be taken by the freezing
    7749           40 :         let flush_id = self
    7750           40 :             .tl
    7751           40 :             .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
    7752           40 :             .await?;
    7753              : 
    7754           40 :         assert!(self.write_guard.is_none());
    7755              : 
    7756           40 :         if let Some(wait_threshold) = wait_threshold {
    7757            0 :             if l0_count >= wait_threshold {
    7758            0 :                 debug!(
    7759            0 :                     "layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers"
    7760              :                 );
    7761            0 :                 self.tl.wait_flush_completion(flush_id).await?;
    7762            0 :             }
    7763           40 :         }
    7764              : 
    7765           40 :         if current_size >= self.get_checkpoint_distance() * 2 {
    7766            0 :             warn!("Flushed oversized open layer with size {}", current_size)
    7767           40 :         }
    7768              : 
    7769           40 :         Ok(())
    7770           40 :     }
    7771              : 
    7772      2402129 :     fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
    7773      2402129 :         let state = &*self.write_guard;
    7774      2402129 :         let Some(state) = &state else {
    7775          620 :             return OpenLayerAction::Open;
    7776              :         };
    7777              : 
    7778              :         #[cfg(feature = "testing")]
    7779      2401509 :         if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
    7780              :             // this check and assertion are not really needed because
    7781              :             // LayerManager::try_freeze_in_memory_layer will always clear out the
    7782              :             // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
    7783              :             // is no TimelineWriterState.
    7784            0 :             assert!(
    7785            0 :                 state.open_layer.end_lsn.get().is_some(),
    7786            0 :                 "our open_layer must be outdated"
    7787              :             );
    7788              : 
    7789              :             // this would be a memory leak waiting to happen because the in-memory layer always has
    7790              :             // an index
    7791            0 :             panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
    7792      2401509 :         }
    7793              : 
    7794      2401509 :         if state.prev_lsn == Some(lsn) {
    7795              :             // Rolling mid LSN is not supported by [downstream code].
    7796              :             // Hence, only roll at LSN boundaries.
    7797              :             //
    7798              :             // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
    7799            3 :             return OpenLayerAction::None;
    7800      2401506 :         }
    7801              : 
    7802      2401506 :         if state.current_size == 0 {
    7803              :             // Don't roll empty layers
    7804            0 :             return OpenLayerAction::None;
    7805      2401506 :         }
    7806              : 
    7807      2401506 :         if self.tl.should_roll(
    7808      2401506 :             state.current_size,
    7809      2401506 :             state.current_size + new_value_size,
    7810      2401506 :             self.get_checkpoint_distance(),
    7811      2401506 :             lsn,
    7812      2401506 :             state.cached_last_freeze_at,
    7813      2401506 :             state.open_layer.get_opened_at(),
    7814              :         ) {
    7815           40 :             OpenLayerAction::Roll
    7816              :         } else {
    7817      2401466 :             OpenLayerAction::None
    7818              :         }
    7819      2402129 :     }
    7820              : 
    7821              :     /// Put a batch of keys at the specified Lsns.
    7822      2402128 :     pub(crate) async fn put_batch(
    7823      2402128 :         &mut self,
    7824      2402128 :         batch: SerializedValueBatch,
    7825      2402128 :         ctx: &RequestContext,
    7826      2402128 :     ) -> anyhow::Result<()> {
    7827      2402128 :         if !batch.has_data() {
    7828            0 :             return Ok(());
    7829      2402128 :         }
    7830              : 
    7831              :         // In debug builds, assert that we don't write any keys that don't belong to this shard.
    7832              :         // We don't assert this in release builds, since key ownership policies may change over
    7833              :         // time. Stray keys will be removed during compaction.
    7834      2402128 :         if cfg!(debug_assertions) {
    7835      4947529 :             for metadata in &batch.metadata {
    7836      2545401 :                 if let ValueMeta::Serialized(metadata) = metadata {
    7837      2545401 :                     let key = Key::from_compact(metadata.key);
    7838      2545401 :                     assert!(
    7839      2545401 :                         self.shard_identity.is_key_local(&key)
    7840           12 :                             || self.shard_identity.is_key_global(&key),
    7841            0 :                         "key {key} does not belong on shard {}",
    7842            0 :                         self.shard_identity.shard_index()
    7843              :                     );
    7844            0 :                 }
    7845              :             }
    7846            0 :         }
    7847              : 
    7848      2402128 :         let batch_max_lsn = batch.max_lsn;
    7849      2402128 :         let buf_size: u64 = batch.buffer_size() as u64;
    7850              : 
    7851      2402128 :         let action = self.get_open_layer_action(batch_max_lsn, buf_size);
    7852      2402128 :         let layer = self
    7853      2402128 :             .handle_open_layer_action(batch_max_lsn, action, ctx)
    7854      2402128 :             .await?;
    7855              : 
    7856      2402128 :         let res = layer.put_batch(batch, ctx).await;
    7857              : 
    7858      2402128 :         if res.is_ok() {
    7859      2402128 :             // Update the current size only when the entire write was ok.
    7860      2402128 :             // In case of failures, we may have had partial writes which
    7861      2402128 :             // render the size tracking out of sync. That's ok because
    7862      2402128 :             // the checkpoint distance should be significantly smaller
    7863      2402128 :             // than the S3 single shot upload limit of 5GiB.
    7864      2402128 :             let state = self.write_guard.as_mut().unwrap();
    7865      2402128 : 
    7866      2402128 :             state.current_size += buf_size;
    7867      2402128 :             state.prev_lsn = Some(batch_max_lsn);
    7868      2402128 :             state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
    7869      2402128 :         }
    7870              : 
    7871      2402128 :         res
    7872      2402128 :     }
    7873              : 
    7874              :     #[cfg(test)]
    7875              :     /// Test helper, for tests that would like to poke individual values without composing a batch
    7876      2195079 :     pub(crate) async fn put(
    7877      2195079 :         &mut self,
    7878      2195079 :         key: Key,
    7879      2195079 :         lsn: Lsn,
    7880      2195079 :         value: &Value,
    7881      2195079 :         ctx: &RequestContext,
    7882      2195079 :     ) -> anyhow::Result<()> {
    7883              :         use utils::bin_ser::BeSer;
    7884      2195079 :         if !key.is_valid_key_on_write_path() {
    7885            0 :             bail!(
    7886            0 :                 "the request contains data not supported by pageserver at TimelineWriter::put: {}",
    7887              :                 key
    7888              :             );
    7889      2195079 :         }
    7890      2195079 :         let val_ser_size = value.serialized_size().unwrap() as usize;
    7891      2195079 :         let batch = SerializedValueBatch::from_values(vec![(
    7892      2195079 :             key.to_compact(),
    7893      2195079 :             lsn,
    7894      2195079 :             val_ser_size,
    7895      2195079 :             value.clone(),
    7896      2195079 :         )]);
    7897              : 
    7898      2195079 :         self.put_batch(batch, ctx).await
    7899      2195079 :     }
    7900              : 
    7901            1 :     pub(crate) async fn delete_batch(
    7902            1 :         &mut self,
    7903            1 :         batch: &[(Range<Key>, Lsn)],
    7904            1 :         ctx: &RequestContext,
    7905            1 :     ) -> anyhow::Result<()> {
    7906            1 :         if let Some((_, lsn)) = batch.first() {
    7907            1 :             let action = self.get_open_layer_action(*lsn, 0);
    7908            1 :             let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
    7909            1 :             layer.put_tombstones(batch).await?;
    7910            0 :         }
    7911              : 
    7912            1 :         Ok(())
    7913            1 :     }
    7914              : 
    7915              :     /// Track the end of the latest digested WAL record.
    7916              :     /// Remember the (end of) last valid WAL record remembered in the timeline.
    7917              :     ///
    7918              :     /// Call this after you have finished writing all the WAL up to 'lsn'.
    7919              :     ///
    7920              :     /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
    7921              :     /// the 'lsn' or anything older. The previous last record LSN is stored alongside
    7922              :     /// the latest and can be read.
    7923      2639560 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    7924      2639560 :         self.tl.finish_write(new_lsn);
    7925      2639560 :     }
    7926              : 
    7927       135285 :     pub(crate) fn update_current_logical_size(&self, delta: i64) {
    7928       135285 :         self.tl.update_current_logical_size(delta)
    7929       135285 :     }
    7930              : }
    7931              : 
    7932              : // We need TimelineWriter to be send in upcoming conversion of
    7933              : // Timeline::layers to tokio::sync::RwLock.
    7934              : #[test]
    7935            1 : fn is_send() {
    7936            1 :     fn _assert_send<T: Send>() {}
    7937            1 :     _assert_send::<TimelineWriter<'_>>();
    7938            1 : }
    7939              : 
    7940              : #[cfg(test)]
    7941              : mod tests {
    7942              :     use std::sync::Arc;
    7943              : 
    7944              :     use pageserver_api::key::Key;
    7945              :     use postgres_ffi::PgMajorVersion;
    7946              :     use std::iter::Iterator;
    7947              :     use tracing::Instrument;
    7948              :     use utils::id::TimelineId;
    7949              :     use utils::lsn::Lsn;
    7950              :     use wal_decoder::models::value::Value;
    7951              : 
    7952              :     use super::HeatMapTimeline;
    7953              :     use crate::context::RequestContextBuilder;
    7954              :     use crate::tenant::harness::{TenantHarness, test_img};
    7955              :     use crate::tenant::layer_map::LayerMap;
    7956              :     use crate::tenant::storage_layer::{Layer, LayerName, LayerVisibilityHint};
    7957              :     use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
    7958              :     use crate::tenant::timeline::{DeltaLayerTestDesc, EvictionError};
    7959              :     use crate::tenant::{PreviousHeatmap, Timeline};
    7960              : 
    7961            5 :     fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
    7962            5 :         assert_eq!(lhs.all_layers().count(), rhs.all_layers().count());
    7963            5 :         let lhs_rhs = lhs.all_layers().zip(rhs.all_layers());
    7964           25 :         for (l, r) in lhs_rhs {
    7965           20 :             assert_eq!(l.name, r.name);
    7966           20 :             assert_eq!(l.metadata, r.metadata);
    7967              :         }
    7968            5 :     }
    7969              : 
    7970              :     #[tokio::test]
    7971            1 :     async fn test_heatmap_generation() {
    7972            1 :         let harness = TenantHarness::create("heatmap_generation").await.unwrap();
    7973              : 
    7974            1 :         let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    7975            1 :             Lsn(0x10)..Lsn(0x20),
    7976            1 :             vec![(
    7977            1 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    7978            1 :                 Lsn(0x11),
    7979            1 :                 Value::Image(test_img("foo")),
    7980            1 :             )],
    7981              :         );
    7982            1 :         let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    7983            1 :             Lsn(0x10)..Lsn(0x20),
    7984            1 :             vec![(
    7985            1 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    7986            1 :                 Lsn(0x11),
    7987            1 :                 Value::Image(test_img("foo")),
    7988            1 :             )],
    7989              :         );
    7990            1 :         let l0_delta = DeltaLayerTestDesc::new(
    7991            1 :             Lsn(0x20)..Lsn(0x30),
    7992            1 :             Key::from_hex("000000000000000000000000000000000000").unwrap()
    7993            1 :                 ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
    7994            1 :             vec![(
    7995            1 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    7996            1 :                 Lsn(0x25),
    7997            1 :                 Value::Image(test_img("foo")),
    7998            1 :             )],
    7999              :         );
    8000            1 :         let delta_layers = vec![
    8001            1 :             covered_delta.clone(),
    8002            1 :             visible_delta.clone(),
    8003            1 :             l0_delta.clone(),
    8004              :         ];
    8005              : 
    8006            1 :         let image_layer = (
    8007            1 :             Lsn(0x40),
    8008            1 :             vec![(
    8009            1 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    8010            1 :                 test_img("bar"),
    8011            1 :             )],
    8012            1 :         );
    8013            1 :         let image_layers = vec![image_layer];
    8014              : 
    8015            1 :         let (tenant, ctx) = harness.load().await;
    8016            1 :         let timeline = tenant
    8017            1 :             .create_test_timeline_with_layers(
    8018            1 :                 TimelineId::generate(),
    8019            1 :                 Lsn(0x10),
    8020            1 :                 PgMajorVersion::PG14,
    8021            1 :                 &ctx,
    8022            1 :                 Vec::new(), // in-memory layers
    8023            1 :                 delta_layers,
    8024            1 :                 image_layers,
    8025            1 :                 Lsn(0x100),
    8026            1 :             )
    8027            1 :             .await
    8028            1 :             .unwrap();
    8029            1 :         let ctx = &ctx.with_scope_timeline(&timeline);
    8030              : 
    8031              :         // Layer visibility is an input to heatmap generation, so refresh it first
    8032            1 :         timeline.update_layer_visibility().await.unwrap();
    8033              : 
    8034            1 :         let heatmap = timeline
    8035            1 :             .generate_heatmap()
    8036            1 :             .await
    8037            1 :             .expect("Infallible while timeline is not shut down");
    8038              : 
    8039            1 :         assert_eq!(heatmap.timeline_id, timeline.timeline_id);
    8040              : 
    8041              :         // L0 should come last
    8042            1 :         let heatmap_layers = heatmap.all_layers().collect::<Vec<_>>();
    8043            1 :         assert_eq!(heatmap_layers.last().unwrap().name, l0_delta.layer_name());
    8044              : 
    8045            1 :         let mut last_lsn = Lsn::MAX;
    8046            5 :         for layer in heatmap_layers {
    8047              :             // Covered layer should be omitted
    8048            4 :             assert!(layer.name != covered_delta.layer_name());
    8049              : 
    8050            4 :             let layer_lsn = match &layer.name {
    8051            2 :                 LayerName::Delta(d) => d.lsn_range.end,
    8052            2 :                 LayerName::Image(i) => i.lsn,
    8053              :             };
    8054              : 
    8055              :             // Apart from L0s, newest Layers should come first
    8056            4 :             if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
    8057            3 :                 assert!(layer_lsn <= last_lsn);
    8058            3 :                 last_lsn = layer_lsn;
    8059            1 :             }
    8060              :         }
    8061              : 
    8062              :         // Evict all the layers and stash the old heatmap in the timeline.
    8063              :         // This simulates a migration to a cold secondary location.
    8064              : 
    8065            1 :         let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await;
    8066            1 :         let mut all_layers = Vec::new();
    8067            1 :         let forever = std::time::Duration::from_secs(120);
    8068            5 :         for layer in guard.likely_resident_layers() {
    8069            5 :             all_layers.push(layer.clone());
    8070            5 :             layer.evict_and_wait(forever).await.unwrap();
    8071              :         }
    8072            1 :         drop(guard);
    8073              : 
    8074            1 :         timeline
    8075            1 :             .previous_heatmap
    8076            1 :             .store(Some(Arc::new(PreviousHeatmap::Active {
    8077            1 :                 heatmap: heatmap.clone(),
    8078            1 :                 read_at: std::time::Instant::now(),
    8079            1 :                 end_lsn: None,
    8080            1 :             })));
    8081              : 
    8082              :         // Generate a new heatmap and assert that it contains the same layers as the old one.
    8083            1 :         let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
    8084            1 :         assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
    8085              : 
    8086              :         // Download each layer one by one. Generate the heatmap at each step and check
    8087              :         // that it's stable.
    8088            6 :         for layer in all_layers {
    8089            5 :             if layer.visibility() == LayerVisibilityHint::Covered {
    8090            1 :                 continue;
    8091            4 :             }
    8092            1 : 
    8093            4 :             eprintln!("Downloading {layer} and re-generating heatmap");
    8094            1 : 
    8095            4 :             let ctx = &RequestContextBuilder::from(ctx)
    8096            4 :                 .download_behavior(crate::context::DownloadBehavior::Download)
    8097            4 :                 .attached_child();
    8098            1 : 
    8099            4 :             let _resident = layer
    8100            4 :                 .download_and_keep_resident(ctx)
    8101            4 :                 .instrument(tracing::info_span!(
    8102            4 :                     parent: None,
    8103            1 :                     "download_layer",
    8104            1 :                     tenant_id = %timeline.tenant_shard_id.tenant_id,
    8105            1 :                     shard_id = %timeline.tenant_shard_id.shard_slug(),
    8106            1 :                     timeline_id = %timeline.timeline_id
    8107            1 :                 ))
    8108            4 :                 .await
    8109            4 :                 .unwrap();
    8110            1 : 
    8111            4 :             let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
    8112            4 :             assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
    8113            1 :         }
    8114            1 : 
    8115            1 :         // Everything from the post-migration heatmap is now resident.
    8116            1 :         // Check that we drop it from memory.
    8117            1 :         assert!(matches!(
    8118            1 :             timeline.previous_heatmap.load().as_deref(),
    8119            1 :             Some(PreviousHeatmap::Obsolete)
    8120            1 :         ));
    8121            1 :     }
    8122              : 
    8123              :     #[tokio::test]
    8124            1 :     async fn test_previous_heatmap_obsoletion() {
    8125            1 :         let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
    8126            1 :             .await
    8127            1 :             .unwrap();
    8128              : 
    8129            1 :         let l0_delta = DeltaLayerTestDesc::new(
    8130            1 :             Lsn(0x20)..Lsn(0x30),
    8131            1 :             Key::from_hex("000000000000000000000000000000000000").unwrap()
    8132            1 :                 ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
    8133            1 :             vec![(
    8134            1 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    8135            1 :                 Lsn(0x25),
    8136            1 :                 Value::Image(test_img("foo")),
    8137            1 :             )],
    8138              :         );
    8139              : 
    8140            1 :         let image_layer = (
    8141            1 :             Lsn(0x40),
    8142            1 :             vec![(
    8143            1 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    8144            1 :                 test_img("bar"),
    8145            1 :             )],
    8146            1 :         );
    8147              : 
    8148            1 :         let delta_layers = vec![l0_delta];
    8149            1 :         let image_layers = vec![image_layer];
    8150              : 
    8151            1 :         let (tenant, ctx) = harness.load().await;
    8152            1 :         let timeline = tenant
    8153            1 :             .create_test_timeline_with_layers(
    8154            1 :                 TimelineId::generate(),
    8155            1 :                 Lsn(0x10),
    8156            1 :                 PgMajorVersion::PG14,
    8157            1 :                 &ctx,
    8158            1 :                 Vec::new(), // in-memory layers
    8159            1 :                 delta_layers,
    8160            1 :                 image_layers,
    8161            1 :                 Lsn(0x100),
    8162            1 :             )
    8163            1 :             .await
    8164            1 :             .unwrap();
    8165              : 
    8166              :         // Layer visibility is an input to heatmap generation, so refresh it first
    8167            1 :         timeline.update_layer_visibility().await.unwrap();
    8168              : 
    8169            1 :         let heatmap = timeline
    8170            1 :             .generate_heatmap()
    8171            1 :             .await
    8172            1 :             .expect("Infallible while timeline is not shut down");
    8173              : 
    8174              :         // Both layers should be in the heatmap
    8175            1 :         assert!(heatmap.all_layers().count() > 0);
    8176              : 
    8177              :         // Now simulate a migration.
    8178            1 :         timeline
    8179            1 :             .previous_heatmap
    8180            1 :             .store(Some(Arc::new(PreviousHeatmap::Active {
    8181            1 :                 heatmap: heatmap.clone(),
    8182            1 :                 read_at: std::time::Instant::now(),
    8183            1 :                 end_lsn: None,
    8184            1 :             })));
    8185              : 
    8186              :         // Evict all the layers in the previous heatmap
    8187            1 :         let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await;
    8188            1 :         let forever = std::time::Duration::from_secs(120);
    8189            3 :         for layer in guard.likely_resident_layers() {
    8190            3 :             layer.evict_and_wait(forever).await.unwrap();
    8191              :         }
    8192            1 :         drop(guard);
    8193              : 
    8194              :         // Generate a new heatmap and check that the previous heatmap
    8195              :         // has been marked obsolete.
    8196            1 :         let post_eviction_heatmap = timeline
    8197            1 :             .generate_heatmap()
    8198            1 :             .await
    8199            1 :             .expect("Infallible while timeline is not shut down");
    8200              : 
    8201            1 :         assert_eq!(post_eviction_heatmap.all_layers().count(), 0);
    8202            1 :         assert!(matches!(
    8203            1 :             timeline.previous_heatmap.load().as_deref(),
    8204            1 :             Some(PreviousHeatmap::Obsolete)
    8205            1 :         ));
    8206            1 :     }
    8207              : 
    8208              :     #[tokio::test]
    8209            1 :     async fn two_layer_eviction_attempts_at_the_same_time() {
    8210            1 :         let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
    8211            1 :             .await
    8212            1 :             .unwrap();
    8213              : 
    8214            1 :         let (tenant, ctx) = harness.load().await;
    8215            1 :         let timeline = tenant
    8216            1 :             .create_test_timeline(
    8217            1 :                 TimelineId::generate(),
    8218            1 :                 Lsn(0x10),
    8219            1 :                 PgMajorVersion::PG14,
    8220            1 :                 &ctx,
    8221            1 :             )
    8222            1 :             .await
    8223            1 :             .unwrap();
    8224              : 
    8225            1 :         let layer = find_some_layer(&timeline).await;
    8226            1 :         let layer = layer
    8227            1 :             .keep_resident()
    8228            1 :             .await
    8229            1 :             .expect("no download => no downloading errors")
    8230            1 :             .drop_eviction_guard();
    8231              : 
    8232            1 :         let forever = std::time::Duration::from_secs(120);
    8233              : 
    8234            1 :         let first = layer.evict_and_wait(forever);
    8235            1 :         let second = layer.evict_and_wait(forever);
    8236              : 
    8237            1 :         let (first, second) = tokio::join!(first, second);
    8238              : 
    8239            1 :         let res = layer.keep_resident().await;
    8240            1 :         assert!(res.is_none(), "{res:?}");
    8241              : 
    8242            1 :         match (first, second) {
    8243            1 :             (Ok(()), Ok(())) => {
    8244            1 :                 // because there are no more timeline locks being taken on eviction path, we can
    8245            1 :                 // witness all three outcomes here.
    8246            1 :             }
    8247            1 :             (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
    8248            0 :                 // if one completes before the other, this is fine just as well.
    8249            0 :             }
    8250            1 :             other => unreachable!("unexpected {:?}", other),
    8251            1 :         }
    8252            1 :     }
    8253              : 
    8254            1 :     async fn find_some_layer(timeline: &Timeline) -> Layer {
    8255            1 :         let layers = timeline
    8256            1 :             .layers
    8257            1 :             .read(LayerManagerLockHolder::GetLayerMapInfo)
    8258            1 :             .await;
    8259            1 :         let desc = layers
    8260            1 :             .layer_map()
    8261            1 :             .unwrap()
    8262            1 :             .iter_historic_layers()
    8263            1 :             .next()
    8264            1 :             .expect("must find one layer to evict");
    8265              : 
    8266            1 :         layers.get_from_desc(&desc)
    8267            1 :     }
    8268              : }
        

Generated by: LCOV version 2.1-beta