LCOV - code coverage report
Current view: top level - pageserver/src/tenant - timeline.rs (source / functions) Coverage Total Hit
Test: 1c072cf775425a8f441e533586eea4edd880d500.info Lines: 63.1 % 3995 2522
Test Date: 2025-02-14 11:35:56 Functions: 58.9 % 341 201

            Line data    Source code
       1              : pub(crate) mod analysis;
       2              : pub(crate) mod compaction;
       3              : pub mod delete;
       4              : pub(crate) mod detach_ancestor;
       5              : mod eviction_task;
       6              : pub(crate) mod handle;
       7              : pub(crate) mod import_pgdata;
       8              : mod init;
       9              : pub mod layer_manager;
      10              : pub(crate) mod logical_size;
      11              : pub mod offload;
      12              : pub mod span;
      13              : pub mod uninit;
      14              : mod walreceiver;
      15              : 
      16              : use anyhow::{anyhow, bail, ensure, Context, Result};
      17              : use arc_swap::{ArcSwap, ArcSwapOption};
      18              : use bytes::Bytes;
      19              : use camino::Utf8Path;
      20              : use chrono::{DateTime, Utc};
      21              : use compaction::CompactionOutcome;
      22              : use enumset::EnumSet;
      23              : use fail::fail_point;
      24              : use futures::{stream::FuturesUnordered, StreamExt};
      25              : use handle::ShardTimelineId;
      26              : use layer_manager::Shutdown;
      27              : use offload::OffloadError;
      28              : use once_cell::sync::Lazy;
      29              : use pageserver_api::models::PageTraceEvent;
      30              : use pageserver_api::{
      31              :     key::{
      32              :         KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
      33              :         SPARSE_RANGE,
      34              :     },
      35              :     keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
      36              :     models::{
      37              :         CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
      38              :         DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
      39              :         InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
      40              :     },
      41              :     reltag::BlockNumber,
      42              :     shard::{ShardIdentity, ShardNumber, TenantShardId},
      43              : };
      44              : use rand::Rng;
      45              : use remote_storage::DownloadError;
      46              : use serde_with::serde_as;
      47              : use storage_broker::BrokerClientChannel;
      48              : use tokio::runtime::Handle;
      49              : use tokio::sync::mpsc::Sender;
      50              : use tokio::sync::{oneshot, watch, Notify};
      51              : use tokio_util::sync::CancellationToken;
      52              : use tracing::*;
      53              : use utils::critical;
      54              : use utils::rate_limit::RateLimit;
      55              : use utils::{
      56              :     fs_ext,
      57              :     guard_arc_swap::GuardArcSwap,
      58              :     pausable_failpoint,
      59              :     postgres_client::PostgresClientProtocol,
      60              :     sync::gate::{Gate, GateGuard},
      61              : };
      62              : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
      63              : 
      64              : use std::array;
      65              : use std::cmp::{max, min};
      66              : use std::collections::btree_map::Entry;
      67              : use std::collections::{BTreeMap, HashMap, HashSet};
      68              : use std::ops::{ControlFlow, Deref, Range};
      69              : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
      70              : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
      71              : use std::time::{Duration, Instant, SystemTime};
      72              : 
      73              : use crate::l0_flush::{self, L0FlushGlobalState};
      74              : use crate::tenant::storage_layer::ImageLayerName;
      75              : use crate::{
      76              :     aux_file::AuxFileSizeEstimator,
      77              :     page_service::TenantManagerTypes,
      78              :     tenant::{
      79              :         config::AttachmentMode,
      80              :         layer_map::{LayerMap, SearchResult},
      81              :         metadata::TimelineMetadata,
      82              :         storage_layer::{
      83              :             inmemory_layer::IndexEntry, BatchLayerWriter, IoConcurrency, PersistentLayerDesc,
      84              :             ValueReconstructSituation,
      85              :         },
      86              :     },
      87              :     walingest::WalLagCooldown,
      88              :     walredo,
      89              : };
      90              : use crate::{
      91              :     context::{DownloadBehavior, RequestContext},
      92              :     disk_usage_eviction_task::DiskUsageEvictionInfo,
      93              :     pgdatadir_mapping::CollectKeySpaceError,
      94              : };
      95              : use crate::{
      96              :     disk_usage_eviction_task::finite_f32,
      97              :     tenant::storage_layer::{
      98              :         AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
      99              :         LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
     100              :         ValuesReconstructState,
     101              :     },
     102              : };
     103              : use crate::{
     104              :     disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
     105              : };
     106              : use crate::{
     107              :     metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
     108              : };
     109              : use crate::{
     110              :     pgdatadir_mapping::DirectoryKind,
     111              :     virtual_file::{MaybeFatalIo, VirtualFile},
     112              : };
     113              : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
     114              : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
     115              : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
     116              : 
     117              : use crate::config::PageServerConf;
     118              : use crate::keyspace::{KeyPartitioning, KeySpace};
     119              : use crate::metrics::{TimelineMetrics, DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_GLOBAL};
     120              : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
     121              : use crate::tenant::config::TenantConfOpt;
     122              : use pageserver_api::reltag::RelTag;
     123              : use pageserver_api::shard::ShardIndex;
     124              : 
     125              : use postgres_connection::PgConnectionConfig;
     126              : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
     127              : use utils::{
     128              :     completion,
     129              :     generation::Generation,
     130              :     id::TimelineId,
     131              :     lsn::{AtomicLsn, Lsn, RecordLsn},
     132              :     seqwait::SeqWait,
     133              :     simple_rcu::{Rcu, RcuReadGuard},
     134              : };
     135              : 
     136              : use crate::task_mgr;
     137              : use crate::task_mgr::TaskKind;
     138              : use crate::tenant::gc_result::GcResult;
     139              : use crate::ZERO_PAGE;
     140              : use pageserver_api::key::Key;
     141              : 
     142              : use self::delete::DeleteTimelineFlow;
     143              : pub(super) use self::eviction_task::EvictionTaskTenantState;
     144              : use self::eviction_task::EvictionTaskTimelineState;
     145              : use self::layer_manager::LayerManager;
     146              : use self::logical_size::LogicalSize;
     147              : use self::walreceiver::{WalReceiver, WalReceiverConf};
     148              : 
     149              : use super::{
     150              :     config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
     151              :     MaybeOffloaded,
     152              : };
     153              : use super::{
     154              :     debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf, HeatMapTimeline,
     155              : };
     156              : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
     157              : use super::{
     158              :     remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
     159              :     storage_layer::ReadableLayer,
     160              : };
     161              : use super::{secondary::heatmap::HeatMapLayer, GcError};
     162              : 
     163              : #[cfg(test)]
     164              : use pageserver_api::value::Value;
     165              : 
     166              : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
     167              : pub(crate) enum FlushLoopState {
     168              :     NotStarted,
     169              :     Running {
     170              :         #[cfg(test)]
     171              :         expect_initdb_optimization: bool,
     172              :         #[cfg(test)]
     173              :         initdb_optimization_count: usize,
     174              :     },
     175              :     Exited,
     176              : }
     177              : 
     178              : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
     179              : pub enum ImageLayerCreationMode {
     180              :     /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
     181              :     Try,
     182              :     /// Force creating the image layers if possible. For now, no image layers will be created
     183              :     /// for metadata keys. Used in compaction code path with force flag enabled.
     184              :     Force,
     185              :     /// Initial ingestion of the data, and no data should be dropped in this function. This
     186              :     /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
     187              :     /// code path.
     188              :     Initial,
     189              : }
     190              : 
     191              : #[derive(Clone, Debug, Default)]
     192              : pub enum LastImageLayerCreationStatus {
     193              :     Incomplete {
     194              :         /// The last key of the partition (exclusive) that was processed in the last
     195              :         /// image layer creation attempt. We will continue from this key in the next
     196              :         /// attempt.
     197              :         last_key: Key,
     198              :     },
     199              :     Complete,
     200              :     #[default]
     201              :     Initial,
     202              : }
     203              : 
     204              : impl std::fmt::Display for ImageLayerCreationMode {
     205         1181 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     206         1181 :         write!(f, "{:?}", self)
     207         1181 :     }
     208              : }
     209              : 
     210              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     211              : /// Can be removed after all refactors are done.
     212           92 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
     213           92 :     drop(rlock)
     214           92 : }
     215              : 
     216              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     217              : /// Can be removed after all refactors are done.
     218         1273 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
     219         1273 :     drop(rlock)
     220         1273 : }
     221              : 
     222              : /// The outward-facing resources required to build a Timeline
     223              : pub struct TimelineResources {
     224              :     pub remote_client: RemoteTimelineClient,
     225              :     pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
     226              :     pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
     227              :     pub l0_compaction_trigger: Arc<Notify>,
     228              :     pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
     229              : }
     230              : 
     231              : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
     232              : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
     233              : /// implicitly extends the relation.  At startup, `complete_as_of` is initialized to the current end
     234              : /// of the timeline (disk_consistent_lsn).  It's used on reads of relation sizes to check if the
     235              : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
     236              : pub(crate) struct RelSizeCache {
     237              :     pub(crate) complete_as_of: Lsn,
     238              :     pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
     239              : }
     240              : 
     241              : pub struct Timeline {
     242              :     pub(crate) conf: &'static PageServerConf,
     243              :     tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
     244              : 
     245              :     myself: Weak<Self>,
     246              : 
     247              :     pub(crate) tenant_shard_id: TenantShardId,
     248              :     pub timeline_id: TimelineId,
     249              : 
     250              :     /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
     251              :     /// Never changes for the lifetime of this [`Timeline`] object.
     252              :     ///
     253              :     /// This duplicates the generation stored in LocationConf, but that structure is mutable:
     254              :     /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
     255              :     pub(crate) generation: Generation,
     256              : 
     257              :     /// The detailed sharding information from our parent Tenant.  This enables us to map keys
     258              :     /// to shards, and is constant through the lifetime of this Timeline.
     259              :     shard_identity: ShardIdentity,
     260              : 
     261              :     pub pg_version: u32,
     262              : 
     263              :     /// The tuple has two elements.
     264              :     /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
     265              :     /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
     266              :     ///
     267              :     /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
     268              :     /// We describe these rectangles through the `PersistentLayerDesc` struct.
     269              :     ///
     270              :     /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
     271              :     /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
     272              :     /// `PersistentLayerDesc`'s.
     273              :     ///
     274              :     /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
     275              :     /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
     276              :     /// runtime, e.g., during page reconstruction.
     277              :     ///
     278              :     /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
     279              :     /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
     280              :     pub(crate) layers: tokio::sync::RwLock<LayerManager>,
     281              : 
     282              :     last_freeze_at: AtomicLsn,
     283              :     // Atomic would be more appropriate here.
     284              :     last_freeze_ts: RwLock<Instant>,
     285              : 
     286              :     pub(crate) standby_horizon: AtomicLsn,
     287              : 
     288              :     // WAL redo manager. `None` only for broken tenants.
     289              :     walredo_mgr: Option<Arc<super::WalRedoManager>>,
     290              : 
     291              :     /// Remote storage client.
     292              :     /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
     293              :     pub(crate) remote_client: Arc<RemoteTimelineClient>,
     294              : 
     295              :     // What page versions do we hold in the repository? If we get a
     296              :     // request > last_record_lsn, we need to wait until we receive all
     297              :     // the WAL up to the request. The SeqWait provides functions for
     298              :     // that. TODO: If we get a request for an old LSN, such that the
     299              :     // versions have already been garbage collected away, we should
     300              :     // throw an error, but we don't track that currently.
     301              :     //
     302              :     // last_record_lsn.load().last points to the end of last processed WAL record.
     303              :     //
     304              :     // We also remember the starting point of the previous record in
     305              :     // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
     306              :     // first WAL record when the node is started up. But here, we just
     307              :     // keep track of it.
     308              :     last_record_lsn: SeqWait<RecordLsn, Lsn>,
     309              : 
     310              :     // All WAL records have been processed and stored durably on files on
     311              :     // local disk, up to this LSN. On crash and restart, we need to re-process
     312              :     // the WAL starting from this point.
     313              :     //
     314              :     // Some later WAL records might have been processed and also flushed to disk
     315              :     // already, so don't be surprised to see some, but there's no guarantee on
     316              :     // them yet.
     317              :     disk_consistent_lsn: AtomicLsn,
     318              : 
     319              :     // Parent timeline that this timeline was branched from, and the LSN
     320              :     // of the branch point.
     321              :     ancestor_timeline: Option<Arc<Timeline>>,
     322              :     ancestor_lsn: Lsn,
     323              : 
     324              :     pub(super) metrics: TimelineMetrics,
     325              : 
     326              :     // `Timeline` doesn't write these metrics itself, but it manages the lifetime.  Code
     327              :     // in `crate::page_service` writes these metrics.
     328              :     pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
     329              : 
     330              :     directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
     331              : 
     332              :     /// Ensures layers aren't frozen by checkpointer between
     333              :     /// [`Timeline::get_layer_for_write`] and layer reads.
     334              :     /// Locked automatically by [`TimelineWriter`] and checkpointer.
     335              :     /// Must always be acquired before the layer map/individual layer lock
     336              :     /// to avoid deadlock.
     337              :     ///
     338              :     /// The state is cleared upon freezing.
     339              :     write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
     340              : 
     341              :     /// Used to avoid multiple `flush_loop` tasks running
     342              :     pub(super) flush_loop_state: Mutex<FlushLoopState>,
     343              : 
     344              :     /// layer_flush_start_tx can be used to wake up the layer-flushing task.
     345              :     /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
     346              :     ///   The flush cycle counter is sent back on the layer_flush_done channel when
     347              :     ///   the flush finishes. You can use that to wait for the flush to finish.
     348              :     /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
     349              :     ///   read by whoever sends an update
     350              :     layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
     351              :     /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
     352              :     layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
     353              : 
     354              :     // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
     355              :     // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
     356              :     // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
     357              :     // the planned GC cutoff.
     358              :     pub applied_gc_cutoff_lsn: Rcu<Lsn>,
     359              : 
     360              :     pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
     361              : 
     362              :     // List of child timelines and their branch points. This is needed to avoid
     363              :     // garbage collecting data that is still needed by the child timelines.
     364              :     pub(crate) gc_info: std::sync::RwLock<GcInfo>,
     365              : 
     366              :     pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
     367              : 
     368              :     // It may change across major versions so for simplicity
     369              :     // keep it after running initdb for a timeline.
     370              :     // It is needed in checks when we want to error on some operations
     371              :     // when they are requested for pre-initdb lsn.
     372              :     // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
     373              :     // though let's keep them both for better error visibility.
     374              :     pub initdb_lsn: Lsn,
     375              : 
     376              :     /// The repartitioning result. Allows a single writer and multiple readers.
     377              :     pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
     378              : 
     379              :     /// Configuration: how often should the partitioning be recalculated.
     380              :     repartition_threshold: u64,
     381              : 
     382              :     last_image_layer_creation_check_at: AtomicLsn,
     383              :     last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
     384              : 
     385              :     /// Current logical size of the "datadir", at the last LSN.
     386              :     current_logical_size: LogicalSize,
     387              : 
     388              :     /// Information about the last processed message by the WAL receiver,
     389              :     /// or None if WAL receiver has not received anything for this timeline
     390              :     /// yet.
     391              :     pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
     392              :     pub walreceiver: Mutex<Option<WalReceiver>>,
     393              : 
     394              :     /// Relation size cache
     395              :     pub(crate) rel_size_cache: RwLock<RelSizeCache>,
     396              : 
     397              :     download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
     398              : 
     399              :     state: watch::Sender<TimelineState>,
     400              : 
     401              :     /// Prevent two tasks from deleting the timeline at the same time. If held, the
     402              :     /// timeline is being deleted. If 'true', the timeline has already been deleted.
     403              :     pub delete_progress: TimelineDeleteProgress,
     404              : 
     405              :     eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
     406              : 
     407              :     /// Load or creation time information about the disk_consistent_lsn and when the loading
     408              :     /// happened. Used for consumption metrics.
     409              :     pub(crate) loaded_at: (Lsn, SystemTime),
     410              : 
     411              :     /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
     412              :     pub(crate) gate: Gate,
     413              : 
     414              :     /// Cancellation token scoped to this timeline: anything doing long-running work relating
     415              :     /// to the timeline should drop out when this token fires.
     416              :     pub(crate) cancel: CancellationToken,
     417              : 
     418              :     /// Make sure we only have one running compaction at a time in tests.
     419              :     ///
     420              :     /// Must only be taken in two places:
     421              :     /// - [`Timeline::compact`] (this file)
     422              :     /// - [`delete::delete_local_timeline_directory`]
     423              :     ///
     424              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     425              :     compaction_lock: tokio::sync::Mutex<()>,
     426              : 
     427              :     /// If true, the last compaction failed.
     428              :     compaction_failed: AtomicBool,
     429              : 
     430              :     /// Notifies the tenant compaction loop that there is pending L0 compaction work.
     431              :     l0_compaction_trigger: Arc<Notify>,
     432              : 
     433              :     /// Make sure we only have one running gc at a time.
     434              :     ///
     435              :     /// Must only be taken in two places:
     436              :     /// - [`Timeline::gc`] (this file)
     437              :     /// - [`delete::delete_local_timeline_directory`]
     438              :     ///
     439              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     440              :     gc_lock: tokio::sync::Mutex<()>,
     441              : 
     442              :     /// Cloned from [`super::Tenant::pagestream_throttle`] on construction.
     443              :     pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
     444              : 
     445              :     /// Size estimator for aux file v2
     446              :     pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
     447              : 
     448              :     /// Some test cases directly place keys into the timeline without actually modifying the directory
     449              :     /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
     450              :     /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
     451              :     /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
     452              :     /// in the future, add `extra_test_sparse_keyspace` if necessary.
     453              :     #[cfg(test)]
     454              :     pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
     455              : 
     456              :     pub(crate) l0_flush_global_state: L0FlushGlobalState,
     457              : 
     458              :     pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
     459              : 
     460              :     pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
     461              : 
     462              :     /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
     463              :     pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
     464              : 
     465              :     /// If Some, collects GetPage metadata for an ongoing PageTrace.
     466              :     pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
     467              : 
     468              :     previous_heatmap: ArcSwapOption<PreviousHeatmap>,
     469              : }
     470              : 
     471              : pub(crate) enum PreviousHeatmap {
     472              :     Active {
     473              :         heatmap: HeatMapTimeline,
     474              :         read_at: std::time::Instant,
     475              :     },
     476              :     Obsolete,
     477              : }
     478              : 
     479              : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
     480              : 
     481              : pub struct WalReceiverInfo {
     482              :     pub wal_source_connconf: PgConnectionConfig,
     483              :     pub last_received_msg_lsn: Lsn,
     484              :     pub last_received_msg_ts: u128,
     485              : }
     486              : 
     487              : /// Information about how much history needs to be retained, needed by
     488              : /// Garbage Collection.
     489              : #[derive(Default)]
     490              : pub(crate) struct GcInfo {
     491              :     /// Specific LSNs that are needed.
     492              :     ///
     493              :     /// Currently, this includes all points where child branches have
     494              :     /// been forked off from. In the future, could also include
     495              :     /// explicit user-defined snapshot points.
     496              :     pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
     497              : 
     498              :     /// The cutoff coordinates, which are combined by selecting the minimum.
     499              :     pub(crate) cutoffs: GcCutoffs,
     500              : 
     501              :     /// Leases granted to particular LSNs.
     502              :     pub(crate) leases: BTreeMap<Lsn, LsnLease>,
     503              : 
     504              :     /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
     505              :     pub(crate) within_ancestor_pitr: bool,
     506              : }
     507              : 
     508              : impl GcInfo {
     509          600 :     pub(crate) fn min_cutoff(&self) -> Lsn {
     510          600 :         self.cutoffs.select_min()
     511          600 :     }
     512              : 
     513          464 :     pub(super) fn insert_child(
     514          464 :         &mut self,
     515          464 :         child_id: TimelineId,
     516          464 :         child_lsn: Lsn,
     517          464 :         is_offloaded: MaybeOffloaded,
     518          464 :     ) {
     519          464 :         self.retain_lsns.push((child_lsn, child_id, is_offloaded));
     520          464 :         self.retain_lsns.sort_by_key(|i| i.0);
     521          464 :     }
     522              : 
     523            8 :     pub(super) fn remove_child_maybe_offloaded(
     524            8 :         &mut self,
     525            8 :         child_id: TimelineId,
     526            8 :         maybe_offloaded: MaybeOffloaded,
     527            8 :     ) -> bool {
     528            8 :         // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
     529            8 :         // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
     530            8 :         let mut removed = false;
     531           12 :         self.retain_lsns.retain(|i| {
     532           12 :             if removed {
     533            4 :                 return true;
     534            8 :             }
     535            8 :             let remove = i.1 == child_id && i.2 == maybe_offloaded;
     536            8 :             removed |= remove;
     537            8 :             !remove
     538           12 :         });
     539            8 :         removed
     540            8 :     }
     541              : 
     542            8 :     pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
     543            8 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
     544            8 :     }
     545              : 
     546            0 :     pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
     547            0 :         self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
     548            0 :     }
     549          464 :     pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
     550          464 :         self.leases.contains_key(&lsn)
     551          464 :     }
     552              : }
     553              : 
     554              : /// The `GcInfo` component describing which Lsns need to be retained.  Functionally, this
     555              : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
     556              : /// between time-based and space-based retention for observability and consumption metrics purposes.
     557              : #[derive(Debug, Clone)]
     558              : pub(crate) struct GcCutoffs {
     559              :     /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
     560              :     /// history we must keep to retain a specified number of bytes of WAL.
     561              :     pub(crate) space: Lsn,
     562              : 
     563              :     /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
     564              :     /// history we must keep to enable reading back at least the PITR interval duration.
     565              :     pub(crate) time: Lsn,
     566              : }
     567              : 
     568              : impl Default for GcCutoffs {
     569          897 :     fn default() -> Self {
     570          897 :         Self {
     571          897 :             space: Lsn::INVALID,
     572          897 :             time: Lsn::INVALID,
     573          897 :         }
     574          897 :     }
     575              : }
     576              : 
     577              : impl GcCutoffs {
     578          600 :     fn select_min(&self) -> Lsn {
     579          600 :         std::cmp::min(self.space, self.time)
     580          600 :     }
     581              : }
     582              : 
     583              : pub(crate) struct TimelineVisitOutcome {
     584              :     completed_keyspace: KeySpace,
     585              :     image_covered_keyspace: KeySpace,
     586              : }
     587              : 
     588              : /// An error happened in a get() operation.
     589              : #[derive(thiserror::Error, Debug)]
     590              : pub(crate) enum PageReconstructError {
     591              :     #[error(transparent)]
     592              :     Other(anyhow::Error),
     593              : 
     594              :     #[error("Ancestor LSN wait error: {0}")]
     595              :     AncestorLsnTimeout(WaitLsnError),
     596              : 
     597              :     #[error("timeline shutting down")]
     598              :     Cancelled,
     599              : 
     600              :     /// An error happened replaying WAL records
     601              :     #[error(transparent)]
     602              :     WalRedo(anyhow::Error),
     603              : 
     604              :     #[error("{0}")]
     605              :     MissingKey(MissingKeyError),
     606              : }
     607              : 
     608              : impl From<anyhow::Error> for PageReconstructError {
     609            0 :     fn from(value: anyhow::Error) -> Self {
     610            0 :         // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
     611            0 :         match value.downcast::<PageReconstructError>() {
     612            0 :             Ok(pre) => pre,
     613            0 :             Err(other) => PageReconstructError::Other(other),
     614              :         }
     615            0 :     }
     616              : }
     617              : 
     618              : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
     619            0 :     fn from(value: utils::bin_ser::DeserializeError) -> Self {
     620            0 :         PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
     621            0 :     }
     622              : }
     623              : 
     624              : impl From<layer_manager::Shutdown> for PageReconstructError {
     625            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     626            0 :         PageReconstructError::Cancelled
     627            0 :     }
     628              : }
     629              : 
     630              : impl GetVectoredError {
     631              :     #[cfg(test)]
     632           12 :     pub(crate) fn is_missing_key_error(&self) -> bool {
     633           12 :         matches!(self, Self::MissingKey(_))
     634           12 :     }
     635              : }
     636              : 
     637              : impl From<layer_manager::Shutdown> for GetVectoredError {
     638            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     639            0 :         GetVectoredError::Cancelled
     640            0 :     }
     641              : }
     642              : 
     643              : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
     644              : /// only and not used by the "real read path".
     645              : pub enum ReadPathLayerId {
     646              :     PersistentLayer(PersistentLayerKey),
     647              :     InMemoryLayer(Range<Lsn>),
     648              : }
     649              : 
     650              : impl std::fmt::Display for ReadPathLayerId {
     651            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     652            0 :         match self {
     653            0 :             ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key),
     654            0 :             ReadPathLayerId::InMemoryLayer(range) => {
     655            0 :                 write!(f, "in-mem {}..{}", range.start, range.end)
     656              :             }
     657              :         }
     658            0 :     }
     659              : }
     660              : pub struct ReadPath {
     661              :     keyspace: KeySpace,
     662              :     lsn: Lsn,
     663              :     path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
     664              : }
     665              : 
     666              : impl ReadPath {
     667      1255313 :     pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
     668      1255313 :         Self {
     669      1255313 :             keyspace,
     670      1255313 :             lsn,
     671      1255313 :             path: Vec::new(),
     672      1255313 :         }
     673      1255313 :     }
     674              : 
     675      1767933 :     pub fn record_layer_visit(
     676      1767933 :         &mut self,
     677      1767933 :         layer_to_read: &ReadableLayer,
     678      1767933 :         keyspace_to_read: &KeySpace,
     679      1767933 :         lsn_range: &Range<Lsn>,
     680      1767933 :     ) {
     681      1767933 :         let id = match layer_to_read {
     682       554613 :             ReadableLayer::PersistentLayer(layer) => {
     683       554613 :                 ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
     684              :             }
     685      1213320 :             ReadableLayer::InMemoryLayer(layer) => {
     686      1213320 :                 ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
     687              :             }
     688              :         };
     689      1767933 :         self.path
     690      1767933 :             .push((id, keyspace_to_read.clone(), lsn_range.clone()));
     691      1767933 :     }
     692              : }
     693              : 
     694              : impl std::fmt::Display for ReadPath {
     695            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     696            0 :         writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
     697            0 :         for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
     698            0 :             writeln!(
     699            0 :                 f,
     700            0 :                 "{}: {} {}..{} {}",
     701            0 :                 idx, layer_id, lsn_range.start, lsn_range.end, keyspace
     702            0 :             )?;
     703              :         }
     704            0 :         Ok(())
     705            0 :     }
     706              : }
     707              : 
     708              : #[derive(thiserror::Error)]
     709              : pub struct MissingKeyError {
     710              :     key: Key,
     711              :     shard: ShardNumber,
     712              :     cont_lsn: Lsn,
     713              :     request_lsn: Lsn,
     714              :     ancestor_lsn: Option<Lsn>,
     715              :     /// Debug information about the read path if there's an error
     716              :     read_path: Option<ReadPath>,
     717              :     backtrace: Option<std::backtrace::Backtrace>,
     718              : }
     719              : 
     720              : impl std::fmt::Debug for MissingKeyError {
     721            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     722            0 :         write!(f, "{}", self)
     723            0 :     }
     724              : }
     725              : 
     726              : impl std::fmt::Display for MissingKeyError {
     727            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     728            0 :         write!(
     729            0 :             f,
     730            0 :             "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
     731            0 :             self.key, self.shard, self.cont_lsn, self.request_lsn
     732            0 :         )?;
     733              : 
     734            0 :         if let Some(ref ancestor_lsn) = self.ancestor_lsn {
     735            0 :             write!(f, ", ancestor {}", ancestor_lsn)?;
     736            0 :         }
     737              : 
     738            0 :         if let Some(ref read_path) = self.read_path {
     739            0 :             write!(f, "\n{}", read_path)?;
     740            0 :         }
     741              : 
     742            0 :         if let Some(ref backtrace) = self.backtrace {
     743            0 :             write!(f, "\n{}", backtrace)?;
     744            0 :         }
     745              : 
     746            0 :         Ok(())
     747            0 :     }
     748              : }
     749              : 
     750              : impl PageReconstructError {
     751              :     /// Returns true if this error indicates a tenant/timeline shutdown alike situation
     752            0 :     pub(crate) fn is_stopping(&self) -> bool {
     753              :         use PageReconstructError::*;
     754            0 :         match self {
     755            0 :             Cancelled => true,
     756            0 :             Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
     757              :         }
     758            0 :     }
     759              : }
     760              : 
     761              : #[derive(thiserror::Error, Debug)]
     762              : pub(crate) enum CreateImageLayersError {
     763              :     #[error("timeline shutting down")]
     764              :     Cancelled,
     765              : 
     766              :     #[error("read failed")]
     767              :     GetVectoredError(#[source] GetVectoredError),
     768              : 
     769              :     #[error("reconstruction failed")]
     770              :     PageReconstructError(#[source] PageReconstructError),
     771              : 
     772              :     #[error(transparent)]
     773              :     Other(#[from] anyhow::Error),
     774              : }
     775              : 
     776              : impl From<layer_manager::Shutdown> for CreateImageLayersError {
     777            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     778            0 :         CreateImageLayersError::Cancelled
     779            0 :     }
     780              : }
     781              : 
     782              : #[derive(thiserror::Error, Debug, Clone)]
     783              : pub(crate) enum FlushLayerError {
     784              :     /// Timeline cancellation token was cancelled
     785              :     #[error("timeline shutting down")]
     786              :     Cancelled,
     787              : 
     788              :     /// We tried to flush a layer while the Timeline is in an unexpected state
     789              :     #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
     790              :     NotRunning(FlushLoopState),
     791              : 
     792              :     // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
     793              :     // loop via a watch channel, where we can only borrow it.
     794              :     #[error("create image layers (shared)")]
     795              :     CreateImageLayersError(Arc<CreateImageLayersError>),
     796              : 
     797              :     #[error("other (shared)")]
     798              :     Other(#[from] Arc<anyhow::Error>),
     799              : }
     800              : 
     801              : impl FlushLayerError {
     802              :     // When crossing from generic anyhow errors to this error type, we explicitly check
     803              :     // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
     804            0 :     fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
     805            0 :         let cancelled = timeline.cancel.is_cancelled()
     806              :             // The upload queue might have been shut down before the official cancellation of the timeline.
     807            0 :             || err
     808            0 :                 .downcast_ref::<NotInitialized>()
     809            0 :                 .map(NotInitialized::is_stopping)
     810            0 :                 .unwrap_or_default();
     811            0 :         if cancelled {
     812            0 :             Self::Cancelled
     813              :         } else {
     814            0 :             Self::Other(Arc::new(err))
     815              :         }
     816            0 :     }
     817              : }
     818              : 
     819              : impl From<layer_manager::Shutdown> for FlushLayerError {
     820            0 :     fn from(_: layer_manager::Shutdown) -> Self {
     821            0 :         FlushLayerError::Cancelled
     822            0 :     }
     823              : }
     824              : 
     825              : #[derive(thiserror::Error, Debug)]
     826              : pub(crate) enum GetVectoredError {
     827              :     #[error("timeline shutting down")]
     828              :     Cancelled,
     829              : 
     830              :     #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
     831              :     Oversized(u64),
     832              : 
     833              :     #[error("requested at invalid LSN: {0}")]
     834              :     InvalidLsn(Lsn),
     835              : 
     836              :     #[error("requested key not found: {0}")]
     837              :     MissingKey(MissingKeyError),
     838              : 
     839              :     #[error("ancestry walk")]
     840              :     GetReadyAncestorError(#[source] GetReadyAncestorError),
     841              : 
     842              :     #[error(transparent)]
     843              :     Other(#[from] anyhow::Error),
     844              : }
     845              : 
     846              : impl From<GetReadyAncestorError> for GetVectoredError {
     847            4 :     fn from(value: GetReadyAncestorError) -> Self {
     848              :         use GetReadyAncestorError::*;
     849            4 :         match value {
     850            0 :             Cancelled => GetVectoredError::Cancelled,
     851              :             AncestorLsnTimeout(_) | BadState { .. } => {
     852            4 :                 GetVectoredError::GetReadyAncestorError(value)
     853              :             }
     854              :         }
     855            4 :     }
     856              : }
     857              : 
     858              : #[derive(thiserror::Error, Debug)]
     859              : pub(crate) enum GetReadyAncestorError {
     860              :     #[error("ancestor LSN wait error")]
     861              :     AncestorLsnTimeout(#[from] WaitLsnError),
     862              : 
     863              :     #[error("bad state on timeline {timeline_id}: {state:?}")]
     864              :     BadState {
     865              :         timeline_id: TimelineId,
     866              :         state: TimelineState,
     867              :     },
     868              : 
     869              :     #[error("cancelled")]
     870              :     Cancelled,
     871              : }
     872              : 
     873              : #[derive(Clone, Copy)]
     874              : pub enum LogicalSizeCalculationCause {
     875              :     Initial,
     876              :     ConsumptionMetricsSyntheticSize,
     877              :     EvictionTaskImitation,
     878              :     TenantSizeHandler,
     879              : }
     880              : 
     881              : pub enum GetLogicalSizePriority {
     882              :     User,
     883              :     Background,
     884              : }
     885              : 
     886            0 : #[derive(Debug, enumset::EnumSetType)]
     887              : pub(crate) enum CompactFlags {
     888              :     ForceRepartition,
     889              :     ForceImageLayerCreation,
     890              :     ForceL0Compaction,
     891              :     OnlyL0Compaction,
     892              :     EnhancedGcBottomMostCompaction,
     893              :     DryRun,
     894              :     /// Disables compaction yielding e.g. due to high L0 count. This is set e.g. when requesting
     895              :     /// compaction via HTTP API.
     896              :     NoYield,
     897              : }
     898              : 
     899              : #[serde_with::serde_as]
     900            0 : #[derive(Debug, Clone, serde::Deserialize)]
     901              : pub(crate) struct CompactRequest {
     902              :     pub compact_key_range: Option<CompactKeyRange>,
     903              :     pub compact_lsn_range: Option<CompactLsnRange>,
     904              :     /// Whether the compaction job should be scheduled.
     905              :     #[serde(default)]
     906              :     pub scheduled: bool,
     907              :     /// Whether the compaction job should be split across key ranges.
     908              :     #[serde(default)]
     909              :     pub sub_compaction: bool,
     910              :     /// Max job size for each subcompaction job.
     911              :     pub sub_compaction_max_job_size_mb: Option<u64>,
     912              : }
     913              : 
     914              : #[derive(Debug, Clone, Default)]
     915              : pub(crate) struct CompactOptions {
     916              :     pub flags: EnumSet<CompactFlags>,
     917              :     /// If set, the compaction will only compact the key range specified by this option.
     918              :     /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
     919              :     pub compact_key_range: Option<CompactKeyRange>,
     920              :     /// If set, the compaction will only compact the LSN within this value.
     921              :     /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
     922              :     pub compact_lsn_range: Option<CompactLsnRange>,
     923              :     /// Enable sub-compaction (split compaction job across key ranges).
     924              :     /// This option is only used by GC compaction.
     925              :     pub sub_compaction: bool,
     926              :     /// Set job size for the GC compaction.
     927              :     /// This option is only used by GC compaction.
     928              :     pub sub_compaction_max_job_size_mb: Option<u64>,
     929              : }
     930              : 
     931              : impl std::fmt::Debug for Timeline {
     932            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
     933            0 :         write!(f, "Timeline<{}>", self.timeline_id)
     934            0 :     }
     935              : }
     936              : 
     937              : #[derive(thiserror::Error, Debug)]
     938              : pub(crate) enum WaitLsnError {
     939              :     // Called on a timeline which is shutting down
     940              :     #[error("Shutdown")]
     941              :     Shutdown,
     942              : 
     943              :     // Called on an timeline not in active state or shutting down
     944              :     #[error("Bad timeline state: {0:?}")]
     945              :     BadState(TimelineState),
     946              : 
     947              :     // Timeout expired while waiting for LSN to catch up with goal.
     948              :     #[error("{0}")]
     949              :     Timeout(String),
     950              : }
     951              : 
     952              : // The impls below achieve cancellation mapping for errors.
     953              : // Perhaps there's a way of achieving this with less cruft.
     954              : 
     955              : impl From<CreateImageLayersError> for CompactionError {
     956            0 :     fn from(e: CreateImageLayersError) -> Self {
     957            0 :         match e {
     958            0 :             CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
     959            0 :             CreateImageLayersError::Other(e) => {
     960            0 :                 CompactionError::Other(e.context("create image layers"))
     961              :             }
     962            0 :             _ => CompactionError::Other(e.into()),
     963              :         }
     964            0 :     }
     965              : }
     966              : 
     967              : impl From<CreateImageLayersError> for FlushLayerError {
     968            0 :     fn from(e: CreateImageLayersError) -> Self {
     969            0 :         match e {
     970            0 :             CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
     971            0 :             any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
     972              :         }
     973            0 :     }
     974              : }
     975              : 
     976              : impl From<PageReconstructError> for CreateImageLayersError {
     977            0 :     fn from(e: PageReconstructError) -> Self {
     978            0 :         match e {
     979            0 :             PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
     980            0 :             _ => CreateImageLayersError::PageReconstructError(e),
     981              :         }
     982            0 :     }
     983              : }
     984              : 
     985              : impl From<GetVectoredError> for CreateImageLayersError {
     986            0 :     fn from(e: GetVectoredError) -> Self {
     987            0 :         match e {
     988            0 :             GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
     989            0 :             _ => CreateImageLayersError::GetVectoredError(e),
     990              :         }
     991            0 :     }
     992              : }
     993              : 
     994              : impl From<GetVectoredError> for PageReconstructError {
     995           12 :     fn from(e: GetVectoredError) -> Self {
     996           12 :         match e {
     997            0 :             GetVectoredError::Cancelled => PageReconstructError::Cancelled,
     998            0 :             GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
     999            0 :             err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
    1000            8 :             GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
    1001            4 :             GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
    1002            0 :             GetVectoredError::Other(err) => PageReconstructError::Other(err),
    1003              :         }
    1004           12 :     }
    1005              : }
    1006              : 
    1007              : impl From<GetReadyAncestorError> for PageReconstructError {
    1008            4 :     fn from(e: GetReadyAncestorError) -> Self {
    1009              :         use GetReadyAncestorError::*;
    1010            4 :         match e {
    1011            0 :             AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
    1012            4 :             bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
    1013            0 :             Cancelled => PageReconstructError::Cancelled,
    1014              :         }
    1015            4 :     }
    1016              : }
    1017              : 
    1018              : pub(crate) enum WaitLsnTimeout {
    1019              :     Custom(Duration),
    1020              :     // Use the [`PageServerConf::wait_lsn_timeout`] default
    1021              :     Default,
    1022              : }
    1023              : 
    1024              : pub(crate) enum WaitLsnWaiter<'a> {
    1025              :     Timeline(&'a Timeline),
    1026              :     Tenant,
    1027              :     PageService,
    1028              :     HttpEndpoint,
    1029              : }
    1030              : 
    1031              : /// Argument to [`Timeline::shutdown`].
    1032              : #[derive(Debug, Clone, Copy)]
    1033              : pub(crate) enum ShutdownMode {
    1034              :     /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
    1035              :     /// also to remote storage.  This method can easily take multiple seconds for a busy timeline.
    1036              :     ///
    1037              :     /// While we are flushing, we continue to accept read I/O for LSNs ingested before
    1038              :     /// the call to [`Timeline::shutdown`].
    1039              :     FreezeAndFlush,
    1040              :     /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
    1041              :     /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
    1042              :     /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
    1043              :     /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
    1044              :     Reload,
    1045              :     /// Shut down immediately, without waiting for any open layers to flush.
    1046              :     Hard,
    1047              : }
    1048              : 
    1049              : enum ImageLayerCreationOutcome {
    1050              :     /// We generated an image layer
    1051              :     Generated {
    1052              :         unfinished_image_layer: ImageLayerWriter,
    1053              :     },
    1054              :     /// The key range is empty
    1055              :     Empty,
    1056              :     /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
    1057              :     /// the image layer creation.
    1058              :     Skip,
    1059              : }
    1060              : 
    1061              : /// Public interface functions
    1062              : impl Timeline {
    1063              :     /// Get the LSN where this branch was created
    1064          776 :     pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
    1065          776 :         self.ancestor_lsn
    1066          776 :     }
    1067              : 
    1068              :     /// Get the ancestor's timeline id
    1069           24 :     pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
    1070           24 :         self.ancestor_timeline
    1071           24 :             .as_ref()
    1072           24 :             .map(|ancestor| ancestor.timeline_id)
    1073           24 :     }
    1074              : 
    1075              :     /// Get the ancestor timeline
    1076            4 :     pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
    1077            4 :         self.ancestor_timeline.as_ref()
    1078            4 :     }
    1079              : 
    1080              :     /// Get the bytes written since the PITR cutoff on this branch, and
    1081              :     /// whether this branch's ancestor_lsn is within its parent's PITR.
    1082            0 :     pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
    1083            0 :         let gc_info = self.gc_info.read().unwrap();
    1084            0 :         let history = self
    1085            0 :             .get_last_record_lsn()
    1086            0 :             .checked_sub(gc_info.cutoffs.time)
    1087            0 :             .unwrap_or(Lsn(0))
    1088            0 :             .0;
    1089            0 :         (history, gc_info.within_ancestor_pitr)
    1090            0 :     }
    1091              : 
    1092              :     /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
    1093      1707589 :     pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
    1094      1707589 :         self.applied_gc_cutoff_lsn.read()
    1095      1707589 :     }
    1096              : 
    1097              :     /// Read timeline's planned GC cutoff: this is the logical end of history that users
    1098              :     /// are allowed to read (based on configured PITR), even if physically we have more history.
    1099            0 :     pub(crate) fn get_gc_cutoff_lsn(&self) -> Lsn {
    1100            0 :         self.gc_info.read().unwrap().cutoffs.time
    1101            0 :     }
    1102              : 
    1103              :     /// Look up given page version.
    1104              :     ///
    1105              :     /// If a remote layer file is needed, it is downloaded as part of this
    1106              :     /// call.
    1107              :     ///
    1108              :     /// This method enforces [`Self::pagestream_throttle`] internally.
    1109              :     ///
    1110              :     /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
    1111              :     /// abstraction above this needs to store suitable metadata to track what
    1112              :     /// data exists with what keys, in separate metadata entries. If a
    1113              :     /// non-existent key is requested, we may incorrectly return a value from
    1114              :     /// an ancestor branch, for example, or waste a lot of cycles chasing the
    1115              :     /// non-existing key.
    1116              :     ///
    1117              :     /// # Cancel-Safety
    1118              :     ///
    1119              :     /// This method is cancellation-safe.
    1120              :     #[inline(always)]
    1121      1215242 :     pub(crate) async fn get(
    1122      1215242 :         &self,
    1123      1215242 :         key: Key,
    1124      1215242 :         lsn: Lsn,
    1125      1215242 :         ctx: &RequestContext,
    1126      1215242 :     ) -> Result<Bytes, PageReconstructError> {
    1127      1215242 :         if !lsn.is_valid() {
    1128            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
    1129      1215242 :         }
    1130      1215242 : 
    1131      1215242 :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
    1132      1215242 :         // already checked the key against the shard_identity when looking up the Timeline from
    1133      1215242 :         // page_service.
    1134      1215242 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
    1135              : 
    1136      1215242 :         let keyspace = KeySpace {
    1137      1215242 :             ranges: vec![key..key.next()],
    1138      1215242 :         };
    1139      1215242 : 
    1140      1215242 :         let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
    1141              : 
    1142      1215242 :         let vectored_res = self
    1143      1215242 :             .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
    1144      1215242 :             .await;
    1145              : 
    1146      1215242 :         let key_value = vectored_res?.pop_first();
    1147      1215230 :         match key_value {
    1148      1215206 :             Some((got_key, value)) => {
    1149      1215206 :                 if got_key != key {
    1150            0 :                     error!(
    1151            0 :                         "Expected {}, but singular vectored get returned {}",
    1152              :                         key, got_key
    1153              :                     );
    1154            0 :                     Err(PageReconstructError::Other(anyhow!(
    1155            0 :                         "Singular vectored get returned wrong key"
    1156            0 :                     )))
    1157              :                 } else {
    1158      1215206 :                     value
    1159              :                 }
    1160              :             }
    1161           24 :             None => Err(PageReconstructError::MissingKey(MissingKeyError {
    1162           24 :                 key,
    1163           24 :                 shard: self.shard_identity.get_shard_number(&key),
    1164           24 :                 cont_lsn: Lsn(0),
    1165           24 :                 request_lsn: lsn,
    1166           24 :                 ancestor_lsn: None,
    1167           24 :                 backtrace: None,
    1168           24 :                 read_path: None,
    1169           24 :             })),
    1170              :         }
    1171      1215242 :     }
    1172              : 
    1173              :     pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
    1174              :     pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
    1175              : 
    1176              :     /// Look up multiple page versions at a given LSN
    1177              :     ///
    1178              :     /// This naive implementation will be replaced with a more efficient one
    1179              :     /// which actually vectorizes the read path.
    1180        39338 :     pub(crate) async fn get_vectored(
    1181        39338 :         &self,
    1182        39338 :         keyspace: KeySpace,
    1183        39338 :         lsn: Lsn,
    1184        39338 :         io_concurrency: super::storage_layer::IoConcurrency,
    1185        39338 :         ctx: &RequestContext,
    1186        39338 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1187        39338 :         if !lsn.is_valid() {
    1188            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1189        39338 :         }
    1190        39338 : 
    1191        39338 :         let key_count = keyspace.total_raw_size().try_into().unwrap();
    1192        39338 :         if key_count > Timeline::MAX_GET_VECTORED_KEYS {
    1193            0 :             return Err(GetVectoredError::Oversized(key_count));
    1194        39338 :         }
    1195              : 
    1196        78676 :         for range in &keyspace.ranges {
    1197        39338 :             let mut key = range.start;
    1198        79141 :             while key != range.end {
    1199        39803 :                 assert!(!self.shard_identity.is_key_disposable(&key));
    1200        39803 :                 key = key.next();
    1201              :             }
    1202              :         }
    1203              : 
    1204        39338 :         trace!(
    1205            0 :             "get vectored request for {:?}@{} from task kind {:?}",
    1206            0 :             keyspace,
    1207            0 :             lsn,
    1208            0 :             ctx.task_kind(),
    1209              :         );
    1210              : 
    1211        39338 :         let start = crate::metrics::GET_VECTORED_LATENCY
    1212        39338 :             .for_task_kind(ctx.task_kind())
    1213        39338 :             .map(|metric| (metric, Instant::now()));
    1214              : 
    1215        39338 :         let res = self
    1216        39338 :             .get_vectored_impl(
    1217        39338 :                 keyspace.clone(),
    1218        39338 :                 lsn,
    1219        39338 :                 &mut ValuesReconstructState::new(io_concurrency),
    1220        39338 :                 ctx,
    1221        39338 :             )
    1222        39338 :             .await;
    1223              : 
    1224        39338 :         if let Some((metric, start)) = start {
    1225            0 :             let elapsed = start.elapsed();
    1226            0 :             metric.observe(elapsed.as_secs_f64());
    1227        39338 :         }
    1228              : 
    1229        39338 :         res
    1230        39338 :     }
    1231              : 
    1232              :     /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
    1233              :     /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
    1234              :     /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
    1235              :     /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
    1236              :     /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
    1237              :     /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
    1238              :     /// the scan operation will not cause OOM in the future.
    1239           24 :     pub(crate) async fn scan(
    1240           24 :         &self,
    1241           24 :         keyspace: KeySpace,
    1242           24 :         lsn: Lsn,
    1243           24 :         ctx: &RequestContext,
    1244           24 :         io_concurrency: super::storage_layer::IoConcurrency,
    1245           24 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1246           24 :         if !lsn.is_valid() {
    1247            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1248           24 :         }
    1249           24 : 
    1250           24 :         trace!(
    1251            0 :             "key-value scan request for {:?}@{} from task kind {:?}",
    1252            0 :             keyspace,
    1253            0 :             lsn,
    1254            0 :             ctx.task_kind()
    1255              :         );
    1256              : 
    1257              :         // We should generalize this into Keyspace::contains in the future.
    1258           48 :         for range in &keyspace.ranges {
    1259           24 :             if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
    1260           24 :                 || range.end.field1 > METADATA_KEY_END_PREFIX
    1261              :             {
    1262            0 :                 return Err(GetVectoredError::Other(anyhow::anyhow!(
    1263            0 :                     "only metadata keyspace can be scanned"
    1264            0 :                 )));
    1265           24 :             }
    1266              :         }
    1267              : 
    1268           24 :         let start = crate::metrics::SCAN_LATENCY
    1269           24 :             .for_task_kind(ctx.task_kind())
    1270           24 :             .map(ScanLatencyOngoingRecording::start_recording);
    1271              : 
    1272           24 :         let vectored_res = self
    1273           24 :             .get_vectored_impl(
    1274           24 :                 keyspace.clone(),
    1275           24 :                 lsn,
    1276           24 :                 &mut ValuesReconstructState::new(io_concurrency),
    1277           24 :                 ctx,
    1278           24 :             )
    1279           24 :             .await;
    1280              : 
    1281           24 :         if let Some(recording) = start {
    1282            0 :             recording.observe();
    1283           24 :         }
    1284              : 
    1285           24 :         vectored_res
    1286           24 :     }
    1287              : 
    1288      1255313 :     pub(super) async fn get_vectored_impl(
    1289      1255313 :         &self,
    1290      1255313 :         keyspace: KeySpace,
    1291      1255313 :         lsn: Lsn,
    1292      1255313 :         reconstruct_state: &mut ValuesReconstructState,
    1293      1255313 :         ctx: &RequestContext,
    1294      1255313 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1295      1255313 :         let read_path = if self.conf.enable_read_path_debugging {
    1296      1255313 :             Some(ReadPath::new(keyspace.clone(), lsn))
    1297              :         } else {
    1298            0 :             None
    1299              :         };
    1300      1255313 :         reconstruct_state.read_path = read_path;
    1301              : 
    1302      1255313 :         let traversal_res: Result<(), _> = self
    1303      1255313 :             .get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
    1304      1255313 :             .await;
    1305      1255313 :         if let Err(err) = traversal_res {
    1306              :             // Wait for all the spawned IOs to complete.
    1307              :             // See comments on `spawn_io` inside `storage_layer` for more details.
    1308           32 :             let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
    1309           32 :                 .into_values()
    1310           32 :                 .map(|state| state.collect_pending_ios())
    1311           32 :                 .collect::<FuturesUnordered<_>>();
    1312           32 :             while collect_futs.next().await.is_some() {}
    1313           32 :             return Err(err);
    1314      1255281 :         };
    1315      1255281 : 
    1316      1255281 :         let layers_visited = reconstruct_state.get_layers_visited();
    1317      1255281 : 
    1318      1255281 :         let futs = FuturesUnordered::new();
    1319      1327945 :         for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
    1320      1327945 :             futs.push({
    1321      1327945 :                 let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
    1322      1327945 :                 async move {
    1323      1327945 :                     assert_eq!(state.situation, ValueReconstructSituation::Complete);
    1324              : 
    1325      1327945 :                     let converted = match state.collect_pending_ios().await {
    1326      1327945 :                         Ok(ok) => ok,
    1327            0 :                         Err(err) => {
    1328            0 :                             return (key, Err(err));
    1329              :                         }
    1330              :                     };
    1331      1327945 :                     DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
    1332      1327945 : 
    1333      1327945 :                     // The walredo module expects the records to be descending in terms of Lsn.
    1334      1327945 :                     // And we submit the IOs in that order, so, there shuold be no need to sort here.
    1335      1327945 :                     debug_assert!(
    1336      1327945 :                         converted
    1337      1327945 :                             .records
    1338      1327945 :                             .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
    1339            0 :                         "{converted:?}"
    1340              :                     );
    1341              : 
    1342              :                     (
    1343      1327945 :                         key,
    1344      1327945 :                         walredo_self.reconstruct_value(key, lsn, converted).await,
    1345              :                     )
    1346      1327945 :                 }
    1347      1327945 :             });
    1348      1327945 :         }
    1349              : 
    1350      1255281 :         let results = futs
    1351      1255281 :             .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
    1352      1255281 :             .await;
    1353              : 
    1354              :         // For aux file keys (v1 or v2) the vectored read path does not return an error
    1355              :         // when they're missing. Instead they are omitted from the resulting btree
    1356              :         // (this is a requirement, not a bug). Skip updating the metric in these cases
    1357              :         // to avoid infinite results.
    1358      1255281 :         if !results.is_empty() {
    1359              :             // Record the total number of layers visited towards each key in the batch. While some
    1360              :             // layers may not intersect with a given read, and the cost of layer visits are
    1361              :             // amortized across the batch, each visited layer contributes directly to the observed
    1362              :             // latency for every read in the batch, which is what we care about.
    1363      1254792 :             if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
    1364            0 :                 static LOG_PACER: Lazy<Mutex<RateLimit>> =
    1365            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
    1366            0 :                 LOG_PACER.lock().unwrap().call(|| {
    1367            0 :                     let num_keys = keyspace.total_raw_size();
    1368            0 :                     let num_pages = results.len();
    1369            0 :                     tracing::info!(
    1370            0 :                       shard_id = %self.tenant_shard_id.shard_slug(),
    1371            0 :                       lsn = %lsn,
    1372            0 :                       "Vectored read for {keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
    1373              :                     );
    1374            0 :                 });
    1375      1254792 :             }
    1376              : 
    1377      2582737 :             for _ in &results {
    1378      1327945 :                 self.metrics.layers_per_read.observe(layers_visited as f64);
    1379      1327945 :                 LAYERS_PER_READ_GLOBAL.observe(layers_visited as f64);
    1380      1327945 :             }
    1381          489 :         }
    1382              : 
    1383      1255281 :         Ok(results)
    1384      1255313 :     }
    1385              : 
    1386              :     /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
    1387       548229 :     pub(crate) fn get_last_record_lsn(&self) -> Lsn {
    1388       548229 :         self.last_record_lsn.load().last
    1389       548229 :     }
    1390              : 
    1391            0 :     pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
    1392            0 :         self.last_record_lsn.load().prev
    1393            0 :     }
    1394              : 
    1395              :     /// Atomically get both last and prev.
    1396          456 :     pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
    1397          456 :         self.last_record_lsn.load()
    1398          456 :     }
    1399              : 
    1400              :     /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
    1401              :     /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
    1402            0 :     pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
    1403            0 :         self.last_record_lsn.status_receiver()
    1404            0 :     }
    1405              : 
    1406         1657 :     pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
    1407         1657 :         self.disk_consistent_lsn.load()
    1408         1657 :     }
    1409              : 
    1410              :     /// remote_consistent_lsn from the perspective of the tenant's current generation,
    1411              :     /// not validated with control plane yet.
    1412              :     /// See [`Self::get_remote_consistent_lsn_visible`].
    1413            0 :     pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
    1414            0 :         self.remote_client.remote_consistent_lsn_projected()
    1415            0 :     }
    1416              : 
    1417              :     /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
    1418              :     /// i.e. a value of remote_consistent_lsn_projected which has undergone
    1419              :     /// generation validation in the deletion queue.
    1420            0 :     pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
    1421            0 :         self.remote_client.remote_consistent_lsn_visible()
    1422            0 :     }
    1423              : 
    1424              :     /// The sum of the file size of all historic layers in the layer map.
    1425              :     /// This method makes no distinction between local and remote layers.
    1426              :     /// Hence, the result **does not represent local filesystem usage**.
    1427            0 :     pub(crate) async fn layer_size_sum(&self) -> u64 {
    1428            0 :         let guard = self.layers.read().await;
    1429            0 :         guard.layer_size_sum()
    1430            0 :     }
    1431              : 
    1432            0 :     pub(crate) fn resident_physical_size(&self) -> u64 {
    1433            0 :         self.metrics.resident_physical_size_get()
    1434            0 :     }
    1435              : 
    1436            0 :     pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
    1437            0 :         array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
    1438            0 :     }
    1439              : 
    1440              :     ///
    1441              :     /// Wait until WAL has been received and processed up to this LSN.
    1442              :     ///
    1443              :     /// You should call this before any of the other get_* or list_* functions. Calling
    1444              :     /// those functions with an LSN that has been processed yet is an error.
    1445              :     ///
    1446       451672 :     pub(crate) async fn wait_lsn(
    1447       451672 :         &self,
    1448       451672 :         lsn: Lsn,
    1449       451672 :         who_is_waiting: WaitLsnWaiter<'_>,
    1450       451672 :         timeout: WaitLsnTimeout,
    1451       451672 :         ctx: &RequestContext, /* Prepare for use by cancellation */
    1452       451672 :     ) -> Result<(), WaitLsnError> {
    1453       451672 :         let state = self.current_state();
    1454       451672 :         if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
    1455            0 :             return Err(WaitLsnError::Shutdown);
    1456       451672 :         } else if !matches!(state, TimelineState::Active) {
    1457            0 :             return Err(WaitLsnError::BadState(state));
    1458       451672 :         }
    1459       451672 : 
    1460       451672 :         if cfg!(debug_assertions) {
    1461       451672 :             match ctx.task_kind() {
    1462              :                 TaskKind::WalReceiverManager
    1463              :                 | TaskKind::WalReceiverConnectionHandler
    1464              :                 | TaskKind::WalReceiverConnectionPoller => {
    1465            0 :                     let is_myself = match who_is_waiting {
    1466            0 :                         WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
    1467            0 :                         WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService | WaitLsnWaiter::HttpEndpoint => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
    1468              :                     };
    1469            0 :                     if is_myself {
    1470            0 :                         if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
    1471              :                             // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
    1472            0 :                             panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
    1473            0 :                         }
    1474            0 :                     } else {
    1475            0 :                         // if another  timeline's  is waiting for us, there's no deadlock risk because
    1476            0 :                         // our walreceiver task can make progress independent of theirs
    1477            0 :                     }
    1478              :                 }
    1479       451672 :                 _ => {}
    1480              :             }
    1481            0 :         }
    1482              : 
    1483       451672 :         let timeout = match timeout {
    1484            0 :             WaitLsnTimeout::Custom(t) => t,
    1485       451672 :             WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
    1486              :         };
    1487              : 
    1488       451672 :         let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
    1489       451672 : 
    1490       451672 :         match self.last_record_lsn.wait_for_timeout(lsn, timeout).await {
    1491       451672 :             Ok(()) => Ok(()),
    1492            0 :             Err(e) => {
    1493              :                 use utils::seqwait::SeqWaitError::*;
    1494            0 :                 match e {
    1495            0 :                     Shutdown => Err(WaitLsnError::Shutdown),
    1496              :                     Timeout => {
    1497              :                         // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
    1498            0 :                         drop(_timer);
    1499            0 :                         let walreceiver_status = self.walreceiver_status();
    1500            0 :                         Err(WaitLsnError::Timeout(format!(
    1501            0 :                         "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
    1502            0 :                         lsn,
    1503            0 :                         self.get_last_record_lsn(),
    1504            0 :                         self.get_disk_consistent_lsn(),
    1505            0 :                         walreceiver_status,
    1506            0 :                     )))
    1507              :                     }
    1508              :                 }
    1509              :             }
    1510              :         }
    1511       451672 :     }
    1512              : 
    1513            0 :     pub(crate) fn walreceiver_status(&self) -> String {
    1514            0 :         match &*self.walreceiver.lock().unwrap() {
    1515            0 :             None => "stopping or stopped".to_string(),
    1516            0 :             Some(walreceiver) => match walreceiver.status() {
    1517            0 :                 Some(status) => status.to_human_readable_string(),
    1518            0 :                 None => "Not active".to_string(),
    1519              :             },
    1520              :         }
    1521            0 :     }
    1522              : 
    1523              :     /// Check that it is valid to request operations with that lsn.
    1524          464 :     pub(crate) fn check_lsn_is_in_scope(
    1525          464 :         &self,
    1526          464 :         lsn: Lsn,
    1527          464 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1528          464 :     ) -> anyhow::Result<()> {
    1529          464 :         ensure!(
    1530          464 :             lsn >= **latest_gc_cutoff_lsn,
    1531            8 :             "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
    1532            8 :             lsn,
    1533            8 :             **latest_gc_cutoff_lsn,
    1534              :         );
    1535          456 :         Ok(())
    1536          464 :     }
    1537              : 
    1538              :     /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
    1539           20 :     pub(crate) fn init_lsn_lease(
    1540           20 :         &self,
    1541           20 :         lsn: Lsn,
    1542           20 :         length: Duration,
    1543           20 :         ctx: &RequestContext,
    1544           20 :     ) -> anyhow::Result<LsnLease> {
    1545           20 :         self.make_lsn_lease(lsn, length, true, ctx)
    1546           20 :     }
    1547              : 
    1548              :     /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
    1549            8 :     pub(crate) fn renew_lsn_lease(
    1550            8 :         &self,
    1551            8 :         lsn: Lsn,
    1552            8 :         length: Duration,
    1553            8 :         ctx: &RequestContext,
    1554            8 :     ) -> anyhow::Result<LsnLease> {
    1555            8 :         self.make_lsn_lease(lsn, length, false, ctx)
    1556            8 :     }
    1557              : 
    1558              :     /// Obtains a temporary lease blocking garbage collection for the given LSN.
    1559              :     ///
    1560              :     /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
    1561              :     /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
    1562              :     ///
    1563              :     /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
    1564              :     /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
    1565           28 :     fn make_lsn_lease(
    1566           28 :         &self,
    1567           28 :         lsn: Lsn,
    1568           28 :         length: Duration,
    1569           28 :         init: bool,
    1570           28 :         _ctx: &RequestContext,
    1571           28 :     ) -> anyhow::Result<LsnLease> {
    1572           24 :         let lease = {
    1573              :             // Normalize the requested LSN to be aligned, and move to the first record
    1574              :             // if it points to the beginning of the page (header).
    1575           28 :             let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
    1576           28 : 
    1577           28 :             let mut gc_info = self.gc_info.write().unwrap();
    1578           28 :             let planned_cutoff = gc_info.min_cutoff();
    1579           28 : 
    1580           28 :             let valid_until = SystemTime::now() + length;
    1581           28 : 
    1582           28 :             let entry = gc_info.leases.entry(lsn);
    1583           28 : 
    1584           28 :             match entry {
    1585           12 :                 Entry::Occupied(mut occupied) => {
    1586           12 :                     let existing_lease = occupied.get_mut();
    1587           12 :                     if valid_until > existing_lease.valid_until {
    1588            4 :                         existing_lease.valid_until = valid_until;
    1589            4 :                         let dt: DateTime<Utc> = valid_until.into();
    1590            4 :                         info!("lease extended to {}", dt);
    1591              :                     } else {
    1592            8 :                         let dt: DateTime<Utc> = existing_lease.valid_until.into();
    1593            8 :                         info!("existing lease covers greater length, valid until {}", dt);
    1594              :                     }
    1595              : 
    1596           12 :                     existing_lease.clone()
    1597              :                 }
    1598           16 :                 Entry::Vacant(vacant) => {
    1599              :                     // Reject already GC-ed LSN if we are in AttachedSingle and
    1600              :                     // not blocked by the lsn lease deadline.
    1601           16 :                     let validate = {
    1602           16 :                         let conf = self.tenant_conf.load();
    1603           16 :                         conf.location.attach_mode == AttachmentMode::Single
    1604           16 :                             && !conf.is_gc_blocked_by_lsn_lease_deadline()
    1605              :                     };
    1606              : 
    1607           16 :                     if init || validate {
    1608           16 :                         let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
    1609           16 :                         if lsn < *latest_gc_cutoff_lsn {
    1610            4 :                             bail!("tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
    1611           12 :                         }
    1612           12 :                         if lsn < planned_cutoff {
    1613            0 :                             bail!("tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}", lsn, planned_cutoff);
    1614           12 :                         }
    1615            0 :                     }
    1616              : 
    1617           12 :                     let dt: DateTime<Utc> = valid_until.into();
    1618           12 :                     info!("lease created, valid until {}", dt);
    1619           12 :                     vacant.insert(LsnLease { valid_until }).clone()
    1620              :                 }
    1621              :             }
    1622              :         };
    1623              : 
    1624           24 :         Ok(lease)
    1625           28 :     }
    1626              : 
    1627              :     /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
    1628              :     /// Returns the flush request ID which can be awaited with wait_flush_completion().
    1629              :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    1630              :     pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
    1631              :         self.freeze0().await
    1632              :     }
    1633              : 
    1634              :     /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
    1635              :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    1636              :     pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
    1637              :         self.freeze_and_flush0().await
    1638              :     }
    1639              : 
    1640              :     /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
    1641              :     /// Returns the flush request ID which can be awaited with wait_flush_completion().
    1642         2245 :     pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
    1643         2245 :         let mut g = self.write_lock.lock().await;
    1644         2245 :         let to_lsn = self.get_last_record_lsn();
    1645         2245 :         self.freeze_inmem_layer_at(to_lsn, &mut g).await
    1646         2245 :     }
    1647              : 
    1648              :     // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
    1649              :     // polluting the span hierarchy.
    1650         2245 :     pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
    1651         2245 :         let token = self.freeze0().await?;
    1652         2245 :         self.wait_flush_completion(token).await
    1653         2245 :     }
    1654              : 
    1655              :     // Check if an open ephemeral layer should be closed: this provides
    1656              :     // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
    1657              :     // an ephemeral layer open forever when idle.  It also freezes layers if the global limit on
    1658              :     // ephemeral layer bytes has been breached.
    1659            0 :     pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
    1660            0 :         let Ok(mut write_guard) = self.write_lock.try_lock() else {
    1661              :             // If the write lock is held, there is an active wal receiver: rolling open layers
    1662              :             // is their responsibility while they hold this lock.
    1663            0 :             return;
    1664              :         };
    1665              : 
    1666              :         // FIXME: why not early exit? because before #7927 the state would had been cleared every
    1667              :         // time, and this was missed.
    1668              :         // if write_guard.is_none() { return; }
    1669              : 
    1670            0 :         let Ok(layers_guard) = self.layers.try_read() else {
    1671              :             // Don't block if the layer lock is busy
    1672            0 :             return;
    1673              :         };
    1674              : 
    1675            0 :         let Ok(lm) = layers_guard.layer_map() else {
    1676            0 :             return;
    1677              :         };
    1678              : 
    1679            0 :         let Some(open_layer) = &lm.open_layer else {
    1680              :             // If there is no open layer, we have no layer freezing to do.  However, we might need to generate
    1681              :             // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
    1682              :             // that didn't result in writes to this shard.
    1683              : 
    1684              :             // Must not hold the layers lock while waiting for a flush.
    1685            0 :             drop(layers_guard);
    1686            0 : 
    1687            0 :             let last_record_lsn = self.get_last_record_lsn();
    1688            0 :             let disk_consistent_lsn = self.get_disk_consistent_lsn();
    1689            0 :             if last_record_lsn > disk_consistent_lsn {
    1690              :                 // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
    1691              :                 // we are a sharded tenant and have skipped some WAL
    1692            0 :                 let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
    1693            0 :                 if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
    1694              :                     // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
    1695              :                     // without any data ingested (yet) doesn't write a remote index as soon as it
    1696              :                     // sees its LSN advance: we only do this if we've been layer-less
    1697              :                     // for some time.
    1698            0 :                     tracing::debug!(
    1699            0 :                         "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
    1700              :                         disk_consistent_lsn,
    1701              :                         last_record_lsn
    1702              :                     );
    1703              : 
    1704              :                     // The flush loop will update remote consistent LSN as well as disk consistent LSN.
    1705              :                     // We know there is no open layer, so we can request freezing without actually
    1706              :                     // freezing anything. This is true even if we have dropped the layers_guard, we
    1707              :                     // still hold the write_guard.
    1708            0 :                     let _ = async {
    1709            0 :                         let token = self
    1710            0 :                             .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
    1711            0 :                             .await?;
    1712            0 :                         self.wait_flush_completion(token).await
    1713            0 :                     }
    1714            0 :                     .await;
    1715            0 :                 }
    1716            0 :             }
    1717              : 
    1718            0 :             return;
    1719              :         };
    1720              : 
    1721            0 :         let Some(current_size) = open_layer.try_len() else {
    1722              :             // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
    1723              :             // read lock to get size should always succeed.
    1724            0 :             tracing::warn!("Lock conflict while reading size of open layer");
    1725            0 :             return;
    1726              :         };
    1727              : 
    1728            0 :         let current_lsn = self.get_last_record_lsn();
    1729              : 
    1730            0 :         let checkpoint_distance_override = open_layer.tick().await;
    1731              : 
    1732            0 :         if let Some(size_override) = checkpoint_distance_override {
    1733            0 :             if current_size > size_override {
    1734              :                 // This is not harmful, but it only happens in relatively rare cases where
    1735              :                 // time-based checkpoints are not happening fast enough to keep the amount of
    1736              :                 // ephemeral data within configured limits.  It's a sign of stress on the system.
    1737            0 :                 tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
    1738            0 :             }
    1739            0 :         }
    1740              : 
    1741            0 :         let checkpoint_distance =
    1742            0 :             checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
    1743            0 : 
    1744            0 :         if self.should_roll(
    1745            0 :             current_size,
    1746            0 :             current_size,
    1747            0 :             checkpoint_distance,
    1748            0 :             self.get_last_record_lsn(),
    1749            0 :             self.last_freeze_at.load(),
    1750            0 :             open_layer.get_opened_at(),
    1751            0 :         ) {
    1752            0 :             match open_layer.info() {
    1753            0 :                 InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
    1754            0 :                     // We may reach this point if the layer was already frozen by not yet flushed: flushing
    1755            0 :                     // happens asynchronously in the background.
    1756            0 :                     tracing::debug!(
    1757            0 :                         "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
    1758              :                     );
    1759              :                 }
    1760              :                 InMemoryLayerInfo::Open { .. } => {
    1761              :                     // Upgrade to a write lock and freeze the layer
    1762            0 :                     drop(layers_guard);
    1763            0 :                     let res = self
    1764            0 :                         .freeze_inmem_layer_at(current_lsn, &mut write_guard)
    1765            0 :                         .await;
    1766              : 
    1767            0 :                     if let Err(e) = res {
    1768            0 :                         tracing::info!(
    1769            0 :                             "failed to flush frozen layer after background freeze: {e:#}"
    1770              :                         );
    1771            0 :                     }
    1772              :                 }
    1773              :             }
    1774            0 :         }
    1775            0 :     }
    1776              : 
    1777              :     /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
    1778              :     ///
    1779              :     /// This is neccessary but not sufficient for offloading of the timeline as it might have
    1780              :     /// child timelines that are not offloaded yet.
    1781            0 :     pub(crate) fn can_offload(&self) -> (bool, &'static str) {
    1782            0 :         if self.remote_client.is_archived() != Some(true) {
    1783            0 :             return (false, "the timeline is not archived");
    1784            0 :         }
    1785            0 :         if !self.remote_client.no_pending_work() {
    1786              :             // if the remote client is still processing some work, we can't offload
    1787            0 :             return (false, "the upload queue is not drained yet");
    1788            0 :         }
    1789            0 : 
    1790            0 :         (true, "ok")
    1791            0 :     }
    1792              : 
    1793              :     /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
    1794              :     /// compaction tasks.
    1795          768 :     pub(crate) async fn compact(
    1796          768 :         self: &Arc<Self>,
    1797          768 :         cancel: &CancellationToken,
    1798          768 :         flags: EnumSet<CompactFlags>,
    1799          768 :         ctx: &RequestContext,
    1800          768 :     ) -> Result<CompactionOutcome, CompactionError> {
    1801          768 :         self.compact_with_options(
    1802          768 :             cancel,
    1803          768 :             CompactOptions {
    1804          768 :                 flags,
    1805          768 :                 compact_key_range: None,
    1806          768 :                 compact_lsn_range: None,
    1807          768 :                 sub_compaction: false,
    1808          768 :                 sub_compaction_max_job_size_mb: None,
    1809          768 :             },
    1810          768 :             ctx,
    1811          768 :         )
    1812          768 :         .await
    1813          768 :     }
    1814              : 
    1815              :     /// Outermost timeline compaction operation; downloads needed layers.
    1816              :     ///
    1817              :     /// NB: the cancellation token is usually from a background task, but can also come from a
    1818              :     /// request task.
    1819          768 :     pub(crate) async fn compact_with_options(
    1820          768 :         self: &Arc<Self>,
    1821          768 :         cancel: &CancellationToken,
    1822          768 :         options: CompactOptions,
    1823          768 :         ctx: &RequestContext,
    1824          768 :     ) -> Result<CompactionOutcome, CompactionError> {
    1825          768 :         // Acquire the compaction lock and task semaphore.
    1826          768 :         //
    1827          768 :         // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
    1828          768 :         // out by other background tasks (including image compaction). We request this via
    1829          768 :         // `BackgroundLoopKind::L0Compaction`.
    1830          768 :         //
    1831          768 :         // If this is a regular compaction pass, and L0-only compaction is enabled in the config,
    1832          768 :         // then we should yield for immediate L0 compaction if necessary while we're waiting for the
    1833          768 :         // background task semaphore. There's no point yielding otherwise, since we'd just end up
    1834          768 :         // right back here.
    1835          768 :         let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
    1836          768 :         let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
    1837            0 :             true => BackgroundLoopKind::L0Compaction,
    1838          768 :             false => BackgroundLoopKind::Compaction,
    1839              :         };
    1840          768 :         let yield_for_l0 = !is_l0_only
    1841          768 :             && self.get_compaction_l0_first()
    1842            0 :             && !options.flags.contains(CompactFlags::NoYield);
    1843              : 
    1844          768 :         let acquire = async move {
    1845          768 :             let guard = self.compaction_lock.lock().await;
    1846          768 :             let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
    1847          768 :             (guard, permit)
    1848          768 :         };
    1849              : 
    1850          768 :         let (_guard, _permit) = tokio::select! {
    1851          768 :             (guard, permit) = acquire => (guard, permit),
    1852          768 :             _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
    1853            0 :                 return Ok(CompactionOutcome::YieldForL0);
    1854              :             }
    1855          768 :             _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
    1856          768 :             _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
    1857              :         };
    1858              : 
    1859          768 :         let last_record_lsn = self.get_last_record_lsn();
    1860          768 : 
    1861          768 :         // Last record Lsn could be zero in case the timeline was just created
    1862          768 :         if !last_record_lsn.is_valid() {
    1863            0 :             warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
    1864            0 :             return Ok(CompactionOutcome::Skipped);
    1865          768 :         }
    1866              : 
    1867          768 :         let result = match self.get_compaction_algorithm_settings().kind {
    1868              :             CompactionAlgorithm::Tiered => {
    1869            0 :                 self.compact_tiered(cancel, ctx).await?;
    1870            0 :                 Ok(CompactionOutcome::Done)
    1871              :             }
    1872          768 :             CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
    1873              :         };
    1874              : 
    1875              :         // Signal compaction failure to avoid L0 flush stalls when it's broken.
    1876            0 :         match result {
    1877          768 :             Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
    1878              :             Err(CompactionError::Other(_)) => {
    1879            0 :                 self.compaction_failed.store(true, AtomicOrdering::Relaxed)
    1880              :             }
    1881              :             // Don't change the current value on offload failure or shutdown. We don't want to
    1882              :             // abruptly stall nor resume L0 flushes in these cases.
    1883            0 :             Err(CompactionError::Offload(_)) => {}
    1884            0 :             Err(CompactionError::ShuttingDown) => {}
    1885              :         };
    1886              : 
    1887          768 :         result
    1888          768 :     }
    1889              : 
    1890              :     /// Mutate the timeline with a [`TimelineWriter`].
    1891     10266385 :     pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
    1892     10266385 :         TimelineWriter {
    1893     10266385 :             tl: self,
    1894     10266385 :             write_guard: self.write_lock.lock().await,
    1895              :         }
    1896     10266385 :     }
    1897              : 
    1898            0 :     pub(crate) fn activate(
    1899            0 :         self: &Arc<Self>,
    1900            0 :         parent: Arc<crate::tenant::Tenant>,
    1901            0 :         broker_client: BrokerClientChannel,
    1902            0 :         background_jobs_can_start: Option<&completion::Barrier>,
    1903            0 :         ctx: &RequestContext,
    1904            0 :     ) {
    1905            0 :         if self.tenant_shard_id.is_shard_zero() {
    1906            0 :             // Logical size is only maintained accurately on shard zero.
    1907            0 :             self.spawn_initial_logical_size_computation_task(ctx);
    1908            0 :         }
    1909            0 :         self.launch_wal_receiver(ctx, broker_client);
    1910            0 :         self.set_state(TimelineState::Active);
    1911            0 :         self.launch_eviction_task(parent, background_jobs_can_start);
    1912            0 :     }
    1913              : 
    1914              :     /// After this function returns, there are no timeline-scoped tasks are left running.
    1915              :     ///
    1916              :     /// The preferred pattern for is:
    1917              :     /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
    1918              :     /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
    1919              :     ///   go the extra mile and keep track of JoinHandles
    1920              :     /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
    1921              :     ///   instead of spawning directly on a runtime. It is a more composable / testable pattern.
    1922              :     ///
    1923              :     /// For legacy reasons, we still have multiple tasks spawned using
    1924              :     /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
    1925              :     /// We refer to these as "timeline-scoped task_mgr tasks".
    1926              :     /// Some of these tasks are already sensitive to Timeline::cancel while others are
    1927              :     /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
    1928              :     /// or [`task_mgr::shutdown_watcher`].
    1929              :     /// We want to gradually convert the code base away from these.
    1930              :     ///
    1931              :     /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
    1932              :     /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
    1933              :     /// ones that aren't mentioned here):
    1934              :     /// - [`TaskKind::TimelineDeletionWorker`]
    1935              :     ///    - NB: also used for tenant deletion
    1936              :     /// - [`TaskKind::RemoteUploadTask`]`
    1937              :     /// - [`TaskKind::InitialLogicalSizeCalculation`]
    1938              :     /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
    1939              :     // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
    1940              :     /// - [`TaskKind::Eviction`]
    1941              :     /// - [`TaskKind::LayerFlushTask`]
    1942              :     /// - [`TaskKind::OndemandLogicalSizeCalculation`]
    1943              :     /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
    1944           20 :     pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
    1945           20 :         debug_assert_current_span_has_tenant_and_timeline_id();
    1946           20 : 
    1947           20 :         // Regardless of whether we're going to try_freeze_and_flush
    1948           20 :         // or not, stop ingesting any more data. Walreceiver only provides
    1949           20 :         // cancellation but no "wait until gone", because it uses the Timeline::gate.
    1950           20 :         // So, only after the self.gate.close() below will we know for sure that
    1951           20 :         // no walreceiver tasks are left.
    1952           20 :         // For `try_freeze_and_flush=true`, this means that we might still be ingesting
    1953           20 :         // data during the call to `self.freeze_and_flush()` below.
    1954           20 :         // That's not ideal, but, we don't have the concept of a ChildGuard,
    1955           20 :         // which is what we'd need to properly model early shutdown of the walreceiver
    1956           20 :         // task sub-tree before the other Timeline task sub-trees.
    1957           20 :         let walreceiver = self.walreceiver.lock().unwrap().take();
    1958           20 :         tracing::debug!(
    1959            0 :             is_some = walreceiver.is_some(),
    1960            0 :             "Waiting for WalReceiverManager..."
    1961              :         );
    1962           20 :         if let Some(walreceiver) = walreceiver {
    1963            0 :             walreceiver.cancel();
    1964           20 :         }
    1965              :         // ... and inform any waiters for newer LSNs that there won't be any.
    1966           20 :         self.last_record_lsn.shutdown();
    1967           20 : 
    1968           20 :         if let ShutdownMode::FreezeAndFlush = mode {
    1969           12 :             let do_flush = if let Some((open, frozen)) = self
    1970           12 :                 .layers
    1971           12 :                 .read()
    1972           12 :                 .await
    1973           12 :                 .layer_map()
    1974           12 :                 .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
    1975           12 :                 .ok()
    1976           12 :                 .filter(|(open, frozen)| *open || *frozen > 0)
    1977              :             {
    1978            0 :                 if self.remote_client.is_archived() == Some(true) {
    1979              :                     // No point flushing on shutdown for an archived timeline: it is not important
    1980              :                     // to have it nice and fresh after our restart, and trying to flush here might
    1981              :                     // race with trying to offload it (which also stops the flush loop)
    1982            0 :                     false
    1983              :                 } else {
    1984            0 :                     tracing::info!(?open, frozen, "flushing and freezing on shutdown");
    1985            0 :                     true
    1986              :                 }
    1987              :             } else {
    1988              :                 // this is double-shutdown, it'll be a no-op
    1989           12 :                 true
    1990              :             };
    1991              : 
    1992              :             // we shut down walreceiver above, so, we won't add anything more
    1993              :             // to the InMemoryLayer; freeze it and wait for all frozen layers
    1994              :             // to reach the disk & upload queue, then shut the upload queue and
    1995              :             // wait for it to drain.
    1996           12 :             if do_flush {
    1997           12 :                 match self.freeze_and_flush().await {
    1998              :                     Ok(_) => {
    1999              :                         // drain the upload queue
    2000              :                         // if we did not wait for completion here, it might be our shutdown process
    2001              :                         // didn't wait for remote uploads to complete at all, as new tasks can forever
    2002              :                         // be spawned.
    2003              :                         //
    2004              :                         // what is problematic is the shutting down of RemoteTimelineClient, because
    2005              :                         // obviously it does not make sense to stop while we wait for it, but what
    2006              :                         // about corner cases like s3 suddenly hanging up?
    2007           12 :                         self.remote_client.shutdown().await;
    2008              :                     }
    2009              :                     Err(FlushLayerError::Cancelled) => {
    2010              :                         // this is likely the second shutdown, ignore silently.
    2011              :                         // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
    2012            0 :                         debug_assert!(self.cancel.is_cancelled());
    2013              :                     }
    2014            0 :                     Err(e) => {
    2015            0 :                         // Non-fatal.  Shutdown is infallible.  Failures to flush just mean that
    2016            0 :                         // we have some extra WAL replay to do next time the timeline starts.
    2017            0 :                         warn!("failed to freeze and flush: {e:#}");
    2018              :                     }
    2019              :                 }
    2020              : 
    2021              :                 // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
    2022              :                 // we also do a final check here to ensure that the queue is empty.
    2023           12 :                 if !self.remote_client.no_pending_work() {
    2024            0 :                     warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
    2025           12 :                 }
    2026            0 :             }
    2027            8 :         }
    2028              : 
    2029           20 :         if let ShutdownMode::Reload = mode {
    2030              :             // drain the upload queue
    2031            4 :             self.remote_client.shutdown().await;
    2032            4 :             if !self.remote_client.no_pending_work() {
    2033            0 :                 warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
    2034            4 :             }
    2035           16 :         }
    2036              : 
    2037              :         // Signal any subscribers to our cancellation token to drop out
    2038           20 :         tracing::debug!("Cancelling CancellationToken");
    2039           20 :         self.cancel.cancel();
    2040           20 : 
    2041           20 :         // Ensure Prevent new page service requests from starting.
    2042           20 :         self.handles.shutdown();
    2043           20 : 
    2044           20 :         // Transition the remote_client into a state where it's only useful for timeline deletion.
    2045           20 :         // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
    2046           20 :         self.remote_client.stop();
    2047           20 : 
    2048           20 :         // As documented in remote_client.stop()'s doc comment, it's our responsibility
    2049           20 :         // to shut down the upload queue tasks.
    2050           20 :         // TODO: fix that, task management should be encapsulated inside remote_client.
    2051           20 :         task_mgr::shutdown_tasks(
    2052           20 :             Some(TaskKind::RemoteUploadTask),
    2053           20 :             Some(self.tenant_shard_id),
    2054           20 :             Some(self.timeline_id),
    2055           20 :         )
    2056           20 :         .await;
    2057              : 
    2058              :         // TODO: work toward making this a no-op. See this function's doc comment for more context.
    2059           20 :         tracing::debug!("Waiting for tasks...");
    2060           20 :         task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
    2061              : 
    2062              :         {
    2063              :             // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
    2064              :             // open.
    2065           20 :             let mut write_guard = self.write_lock.lock().await;
    2066           20 :             self.layers.write().await.shutdown(&mut write_guard);
    2067           20 :         }
    2068           20 : 
    2069           20 :         // Finally wait until any gate-holders are complete.
    2070           20 :         //
    2071           20 :         // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
    2072           20 :         // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
    2073           20 :         self.gate.close().await;
    2074              : 
    2075           20 :         self.metrics.shutdown();
    2076           20 :     }
    2077              : 
    2078          901 :     pub(crate) fn set_state(&self, new_state: TimelineState) {
    2079          901 :         match (self.current_state(), new_state) {
    2080          901 :             (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
    2081            4 :                 info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
    2082              :             }
    2083            0 :             (st, TimelineState::Loading) => {
    2084            0 :                 error!("ignoring transition from {st:?} into Loading state");
    2085              :             }
    2086            0 :             (TimelineState::Broken { .. }, new_state) => {
    2087            0 :                 error!("Ignoring state update {new_state:?} for broken timeline");
    2088              :             }
    2089              :             (TimelineState::Stopping, TimelineState::Active) => {
    2090            0 :                 error!("Not activating a Stopping timeline");
    2091              :             }
    2092          897 :             (_, new_state) => {
    2093          897 :                 self.state.send_replace(new_state);
    2094          897 :             }
    2095              :         }
    2096          901 :     }
    2097              : 
    2098            4 :     pub(crate) fn set_broken(&self, reason: String) {
    2099            4 :         let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
    2100            4 :         let broken_state = TimelineState::Broken {
    2101            4 :             reason,
    2102            4 :             backtrace: backtrace_str,
    2103            4 :         };
    2104            4 :         self.set_state(broken_state);
    2105            4 : 
    2106            4 :         // Although the Broken state is not equivalent to shutdown() (shutdown will be called
    2107            4 :         // later when this tenant is detach or the process shuts down), firing the cancellation token
    2108            4 :         // here avoids the need for other tasks to watch for the Broken state explicitly.
    2109            4 :         self.cancel.cancel();
    2110            4 :     }
    2111              : 
    2112       453824 :     pub(crate) fn current_state(&self) -> TimelineState {
    2113       453824 :         self.state.borrow().clone()
    2114       453824 :     }
    2115              : 
    2116           12 :     pub(crate) fn is_broken(&self) -> bool {
    2117           12 :         matches!(&*self.state.borrow(), TimelineState::Broken { .. })
    2118           12 :     }
    2119              : 
    2120          475 :     pub(crate) fn is_active(&self) -> bool {
    2121          475 :         self.current_state() == TimelineState::Active
    2122          475 :     }
    2123              : 
    2124            0 :     pub(crate) fn is_archived(&self) -> Option<bool> {
    2125            0 :         self.remote_client.is_archived()
    2126            0 :     }
    2127              : 
    2128          776 :     pub(crate) fn is_stopping(&self) -> bool {
    2129          776 :         self.current_state() == TimelineState::Stopping
    2130          776 :     }
    2131              : 
    2132            0 :     pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
    2133            0 :         self.state.subscribe()
    2134            0 :     }
    2135              : 
    2136       451676 :     pub(crate) async fn wait_to_become_active(
    2137       451676 :         &self,
    2138       451676 :         _ctx: &RequestContext, // Prepare for use by cancellation
    2139       451676 :     ) -> Result<(), TimelineState> {
    2140       451676 :         let mut receiver = self.state.subscribe();
    2141              :         loop {
    2142       451676 :             let current_state = receiver.borrow().clone();
    2143       451676 :             match current_state {
    2144              :                 TimelineState::Loading => {
    2145            0 :                     receiver
    2146            0 :                         .changed()
    2147            0 :                         .await
    2148            0 :                         .expect("holding a reference to self");
    2149              :                 }
    2150              :                 TimelineState::Active { .. } => {
    2151       451672 :                     return Ok(());
    2152              :                 }
    2153              :                 TimelineState::Broken { .. } | TimelineState::Stopping => {
    2154              :                     // There's no chance the timeline can transition back into ::Active
    2155            4 :                     return Err(current_state);
    2156              :                 }
    2157              :             }
    2158              :         }
    2159       451676 :     }
    2160              : 
    2161            0 :     pub(crate) async fn layer_map_info(
    2162            0 :         &self,
    2163            0 :         reset: LayerAccessStatsReset,
    2164            0 :     ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
    2165            0 :         let guard = self.layers.read().await;
    2166            0 :         let layer_map = guard.layer_map()?;
    2167            0 :         let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
    2168            0 :         if let Some(open_layer) = &layer_map.open_layer {
    2169            0 :             in_memory_layers.push(open_layer.info());
    2170            0 :         }
    2171            0 :         for frozen_layer in &layer_map.frozen_layers {
    2172            0 :             in_memory_layers.push(frozen_layer.info());
    2173            0 :         }
    2174              : 
    2175            0 :         let historic_layers = layer_map
    2176            0 :             .iter_historic_layers()
    2177            0 :             .map(|desc| guard.get_from_desc(&desc).info(reset))
    2178            0 :             .collect();
    2179            0 : 
    2180            0 :         Ok(LayerMapInfo {
    2181            0 :             in_memory_layers,
    2182            0 :             historic_layers,
    2183            0 :         })
    2184            0 :     }
    2185              : 
    2186              :     #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
    2187              :     pub(crate) async fn download_layer(
    2188              :         &self,
    2189              :         layer_file_name: &LayerName,
    2190              :     ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
    2191              :         let Some(layer) = self
    2192              :             .find_layer(layer_file_name)
    2193              :             .await
    2194            0 :             .map_err(|e| match e {
    2195            0 :                 layer_manager::Shutdown => {
    2196            0 :                     super::storage_layer::layer::DownloadError::TimelineShutdown
    2197            0 :                 }
    2198            0 :             })?
    2199              :         else {
    2200              :             return Ok(None);
    2201              :         };
    2202              : 
    2203              :         layer.download().await?;
    2204              : 
    2205              :         Ok(Some(true))
    2206              :     }
    2207              : 
    2208              :     /// Evict just one layer.
    2209              :     ///
    2210              :     /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
    2211            0 :     pub(crate) async fn evict_layer(
    2212            0 :         &self,
    2213            0 :         layer_file_name: &LayerName,
    2214            0 :     ) -> anyhow::Result<Option<bool>> {
    2215            0 :         let _gate = self
    2216            0 :             .gate
    2217            0 :             .enter()
    2218            0 :             .map_err(|_| anyhow::anyhow!("Shutting down"))?;
    2219              : 
    2220            0 :         let Some(local_layer) = self.find_layer(layer_file_name).await? else {
    2221            0 :             return Ok(None);
    2222              :         };
    2223              : 
    2224              :         // curl has this by default
    2225            0 :         let timeout = std::time::Duration::from_secs(120);
    2226            0 : 
    2227            0 :         match local_layer.evict_and_wait(timeout).await {
    2228            0 :             Ok(()) => Ok(Some(true)),
    2229            0 :             Err(EvictionError::NotFound) => Ok(Some(false)),
    2230            0 :             Err(EvictionError::Downloaded) => Ok(Some(false)),
    2231            0 :             Err(EvictionError::Timeout) => Ok(Some(false)),
    2232              :         }
    2233            0 :     }
    2234              : 
    2235      9606020 :     fn should_roll(
    2236      9606020 :         &self,
    2237      9606020 :         layer_size: u64,
    2238      9606020 :         projected_layer_size: u64,
    2239      9606020 :         checkpoint_distance: u64,
    2240      9606020 :         projected_lsn: Lsn,
    2241      9606020 :         last_freeze_at: Lsn,
    2242      9606020 :         opened_at: Instant,
    2243      9606020 :     ) -> bool {
    2244      9606020 :         let distance = projected_lsn.widening_sub(last_freeze_at);
    2245      9606020 : 
    2246      9606020 :         // Rolling the open layer can be triggered by:
    2247      9606020 :         // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
    2248      9606020 :         //    the safekeepers need to store.  For sharded tenants, we multiply by shard count to
    2249      9606020 :         //    account for how writes are distributed across shards: we expect each node to consume
    2250      9606020 :         //    1/count of the LSN on average.
    2251      9606020 :         // 2. The size of the currently open layer.
    2252      9606020 :         // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
    2253      9606020 :         //    up and suspend activity.
    2254      9606020 :         if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
    2255            0 :             info!(
    2256            0 :                 "Will roll layer at {} with layer size {} due to LSN distance ({})",
    2257              :                 projected_lsn, layer_size, distance
    2258              :             );
    2259              : 
    2260            0 :             true
    2261      9606020 :         } else if projected_layer_size >= checkpoint_distance {
    2262              :             // NB: this check is relied upon by:
    2263          160 :             let _ = IndexEntry::validate_checkpoint_distance;
    2264          160 :             info!(
    2265            0 :                 "Will roll layer at {} with layer size {} due to layer size ({})",
    2266              :                 projected_lsn, layer_size, projected_layer_size
    2267              :             );
    2268              : 
    2269          160 :             true
    2270      9605860 :         } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
    2271            0 :             info!(
    2272            0 :                 "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
    2273            0 :                 projected_lsn,
    2274            0 :                 layer_size,
    2275            0 :                 opened_at.elapsed()
    2276              :             );
    2277              : 
    2278            0 :             true
    2279              :         } else {
    2280      9605860 :             false
    2281              :         }
    2282      9606020 :     }
    2283              : }
    2284              : 
    2285              : /// Number of times we will compute partition within a checkpoint distance.
    2286              : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
    2287              : 
    2288              : // Private functions
    2289              : impl Timeline {
    2290           24 :     pub(crate) fn get_lsn_lease_length(&self) -> Duration {
    2291           24 :         let tenant_conf = self.tenant_conf.load();
    2292           24 :         tenant_conf
    2293           24 :             .tenant_conf
    2294           24 :             .lsn_lease_length
    2295           24 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
    2296           24 :     }
    2297              : 
    2298            0 :     pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
    2299            0 :         let tenant_conf = self.tenant_conf.load();
    2300            0 :         tenant_conf
    2301            0 :             .tenant_conf
    2302            0 :             .lsn_lease_length_for_ts
    2303            0 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
    2304            0 :     }
    2305              : 
    2306            0 :     pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
    2307            0 :         let tenant_conf = self.tenant_conf.load();
    2308            0 :         tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
    2309            0 :     }
    2310              : 
    2311            0 :     pub(crate) fn get_lazy_slru_download(&self) -> bool {
    2312            0 :         let tenant_conf = self.tenant_conf.load();
    2313            0 :         tenant_conf
    2314            0 :             .tenant_conf
    2315            0 :             .lazy_slru_download
    2316            0 :             .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
    2317            0 :     }
    2318              : 
    2319      9609118 :     fn get_checkpoint_distance(&self) -> u64 {
    2320      9609118 :         let tenant_conf = self.tenant_conf.load();
    2321      9609118 :         tenant_conf
    2322      9609118 :             .tenant_conf
    2323      9609118 :             .checkpoint_distance
    2324      9609118 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
    2325      9609118 :     }
    2326              : 
    2327      9605860 :     fn get_checkpoint_timeout(&self) -> Duration {
    2328      9605860 :         let tenant_conf = self.tenant_conf.load();
    2329      9605860 :         tenant_conf
    2330      9605860 :             .tenant_conf
    2331      9605860 :             .checkpoint_timeout
    2332      9605860 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
    2333      9605860 :     }
    2334              : 
    2335         5018 :     fn get_compaction_period(&self) -> Duration {
    2336         5018 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2337         5018 :         tenant_conf
    2338         5018 :             .compaction_period
    2339         5018 :             .unwrap_or(self.conf.default_tenant_conf.compaction_period)
    2340         5018 :     }
    2341              : 
    2342         1369 :     fn get_compaction_target_size(&self) -> u64 {
    2343         1369 :         let tenant_conf = self.tenant_conf.load();
    2344         1369 :         tenant_conf
    2345         1369 :             .tenant_conf
    2346         1369 :             .compaction_target_size
    2347         1369 :             .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
    2348         1369 :     }
    2349              : 
    2350         3209 :     fn get_compaction_threshold(&self) -> usize {
    2351         3209 :         let tenant_conf = self.tenant_conf.load();
    2352         3209 :         tenant_conf
    2353         3209 :             .tenant_conf
    2354         3209 :             .compaction_threshold
    2355         3209 :             .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
    2356         3209 :     }
    2357              : 
    2358           92 :     fn get_compaction_upper_limit(&self) -> usize {
    2359           92 :         let tenant_conf = self.tenant_conf.load();
    2360           92 :         tenant_conf
    2361           92 :             .tenant_conf
    2362           92 :             .compaction_upper_limit
    2363           92 :             .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
    2364           92 :     }
    2365              : 
    2366          768 :     pub fn get_compaction_l0_first(&self) -> bool {
    2367          768 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2368          768 :         tenant_conf
    2369          768 :             .compaction_l0_first
    2370          768 :             .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
    2371          768 :     }
    2372              : 
    2373            0 :     pub fn get_compaction_l0_semaphore(&self) -> bool {
    2374            0 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2375            0 :         tenant_conf
    2376            0 :             .compaction_l0_semaphore
    2377            0 :             .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
    2378            0 :     }
    2379              : 
    2380         2509 :     fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
    2381              :         // Disable L0 flushes by default. This and compaction needs further tuning.
    2382              :         const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 0; // TODO: default to e.g. 3
    2383              : 
    2384              :         // If compaction is disabled, don't delay.
    2385         2509 :         if self.get_compaction_period() == Duration::ZERO {
    2386         2509 :             return None;
    2387            0 :         }
    2388            0 : 
    2389            0 :         let compaction_threshold = self.get_compaction_threshold();
    2390            0 :         let tenant_conf = self.tenant_conf.load();
    2391            0 :         let l0_flush_delay_threshold = tenant_conf
    2392            0 :             .tenant_conf
    2393            0 :             .l0_flush_delay_threshold
    2394            0 :             .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
    2395            0 :             .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
    2396            0 : 
    2397            0 :         // 0 disables backpressure.
    2398            0 :         if l0_flush_delay_threshold == 0 {
    2399            0 :             return None;
    2400            0 :         }
    2401            0 : 
    2402            0 :         // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
    2403            0 :         // backpressure flushes below this.
    2404            0 :         // TODO: the tenant config should have validation to prevent this instead.
    2405            0 :         debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
    2406            0 :         Some(max(l0_flush_delay_threshold, compaction_threshold))
    2407         2509 :     }
    2408              : 
    2409         2509 :     fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
    2410              :         // Disable L0 stalls by default. In ingest benchmarks, we see image compaction take >10
    2411              :         // minutes, blocking L0 compaction, and we can't stall L0 flushes for that long.
    2412              :         const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
    2413              : 
    2414              :         // If compaction is disabled, don't stall.
    2415         2509 :         if self.get_compaction_period() == Duration::ZERO {
    2416         2509 :             return None;
    2417            0 :         }
    2418            0 : 
    2419            0 :         // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
    2420            0 :         // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
    2421            0 :         // on unbounded compaction debt that can take a long time to fix once compaction comes back
    2422            0 :         // online. At least we'll delay flushes, slowing down the growth and buying some time.
    2423            0 :         if self.compaction_failed.load(AtomicOrdering::Relaxed) {
    2424            0 :             return None;
    2425            0 :         }
    2426            0 : 
    2427            0 :         let compaction_threshold = self.get_compaction_threshold();
    2428            0 :         let tenant_conf = self.tenant_conf.load();
    2429            0 :         let l0_flush_stall_threshold = tenant_conf
    2430            0 :             .tenant_conf
    2431            0 :             .l0_flush_stall_threshold
    2432            0 :             .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
    2433            0 : 
    2434            0 :         // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
    2435            0 :         // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
    2436            0 :         // easily adjust the L0 backpressure settings, so just disable stalls in this case.
    2437            0 :         if cfg!(feature = "testing")
    2438            0 :             && compaction_threshold == 1
    2439            0 :             && l0_flush_stall_threshold.is_none()
    2440              :         {
    2441            0 :             return None;
    2442            0 :         }
    2443            0 : 
    2444            0 :         let l0_flush_stall_threshold = l0_flush_stall_threshold
    2445            0 :             .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
    2446            0 : 
    2447            0 :         // 0 disables backpressure.
    2448            0 :         if l0_flush_stall_threshold == 0 {
    2449            0 :             return None;
    2450            0 :         }
    2451            0 : 
    2452            0 :         // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
    2453            0 :         // backpressure flushes below this.
    2454            0 :         // TODO: the tenant config should have validation to prevent this instead.
    2455            0 :         debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
    2456            0 :         Some(max(l0_flush_stall_threshold, compaction_threshold))
    2457         2509 :     }
    2458              : 
    2459         2349 :     fn get_l0_flush_wait_upload(&self) -> bool {
    2460         2349 :         let tenant_conf = self.tenant_conf.load();
    2461         2349 :         tenant_conf
    2462         2349 :             .tenant_conf
    2463         2349 :             .l0_flush_wait_upload
    2464         2349 :             .unwrap_or(self.conf.default_tenant_conf.l0_flush_wait_upload)
    2465         2349 :     }
    2466              : 
    2467            0 :     fn get_image_creation_threshold(&self) -> usize {
    2468            0 :         let tenant_conf = self.tenant_conf.load();
    2469            0 :         tenant_conf
    2470            0 :             .tenant_conf
    2471            0 :             .image_creation_threshold
    2472            0 :             .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
    2473            0 :     }
    2474              : 
    2475          768 :     fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
    2476          768 :         let tenant_conf = &self.tenant_conf.load();
    2477          768 :         tenant_conf
    2478          768 :             .tenant_conf
    2479          768 :             .compaction_algorithm
    2480          768 :             .as_ref()
    2481          768 :             .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
    2482          768 :             .clone()
    2483          768 :     }
    2484              : 
    2485            0 :     fn get_eviction_policy(&self) -> EvictionPolicy {
    2486            0 :         let tenant_conf = self.tenant_conf.load();
    2487            0 :         tenant_conf
    2488            0 :             .tenant_conf
    2489            0 :             .eviction_policy
    2490            0 :             .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
    2491            0 :     }
    2492              : 
    2493          897 :     fn get_evictions_low_residence_duration_metric_threshold(
    2494          897 :         tenant_conf: &TenantConfOpt,
    2495          897 :         default_tenant_conf: &TenantConf,
    2496          897 :     ) -> Duration {
    2497          897 :         tenant_conf
    2498          897 :             .evictions_low_residence_duration_metric_threshold
    2499          897 :             .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
    2500          897 :     }
    2501              : 
    2502         1181 :     fn get_image_layer_creation_check_threshold(&self) -> u8 {
    2503         1181 :         let tenant_conf = self.tenant_conf.load();
    2504         1181 :         tenant_conf
    2505         1181 :             .tenant_conf
    2506         1181 :             .image_layer_creation_check_threshold
    2507         1181 :             .unwrap_or(
    2508         1181 :                 self.conf
    2509         1181 :                     .default_tenant_conf
    2510         1181 :                     .image_layer_creation_check_threshold,
    2511         1181 :             )
    2512         1181 :     }
    2513              : 
    2514            0 :     fn get_image_creation_preempt_threshold(&self) -> usize {
    2515            0 :         let tenant_conf = self.tenant_conf.load();
    2516            0 :         tenant_conf
    2517            0 :             .tenant_conf
    2518            0 :             .image_creation_preempt_threshold
    2519            0 :             .unwrap_or(
    2520            0 :                 self.conf
    2521            0 :                     .default_tenant_conf
    2522            0 :                     .image_creation_preempt_threshold,
    2523            0 :             )
    2524            0 :     }
    2525              : 
    2526              :     /// Resolve the effective WAL receiver protocol to use for this tenant.
    2527              :     ///
    2528              :     /// Priority order is:
    2529              :     /// 1. Tenant config override
    2530              :     /// 2. Default value for tenant config override
    2531              :     /// 3. Pageserver config override
    2532              :     /// 4. Pageserver config default
    2533            0 :     pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
    2534            0 :         let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
    2535            0 :         tenant_conf
    2536            0 :             .wal_receiver_protocol_override
    2537            0 :             .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
    2538            0 :             .unwrap_or(self.conf.wal_receiver_protocol)
    2539            0 :     }
    2540              : 
    2541            0 :     pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
    2542            0 :         // NB: Most tenant conf options are read by background loops, so,
    2543            0 :         // changes will automatically be picked up.
    2544            0 : 
    2545            0 :         // The threshold is embedded in the metric. So, we need to update it.
    2546            0 :         {
    2547            0 :             let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
    2548            0 :                 &new_conf.tenant_conf,
    2549            0 :                 &self.conf.default_tenant_conf,
    2550            0 :             );
    2551            0 : 
    2552            0 :             let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
    2553            0 :             let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
    2554            0 : 
    2555            0 :             let timeline_id_str = self.timeline_id.to_string();
    2556            0 : 
    2557            0 :             self.remote_client.update_config(&new_conf.location);
    2558            0 : 
    2559            0 :             self.metrics
    2560            0 :                 .evictions_with_low_residence_duration
    2561            0 :                 .write()
    2562            0 :                 .unwrap()
    2563            0 :                 .change_threshold(
    2564            0 :                     &tenant_id_str,
    2565            0 :                     &shard_id_str,
    2566            0 :                     &timeline_id_str,
    2567            0 :                     new_threshold,
    2568            0 :                 );
    2569            0 :         }
    2570            0 :     }
    2571              : 
    2572              :     /// Open a Timeline handle.
    2573              :     ///
    2574              :     /// Loads the metadata for the timeline into memory, but not the layer map.
    2575              :     #[allow(clippy::too_many_arguments)]
    2576          897 :     pub(super) fn new(
    2577          897 :         conf: &'static PageServerConf,
    2578          897 :         tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
    2579          897 :         metadata: &TimelineMetadata,
    2580          897 :         previous_heatmap: Option<PreviousHeatmap>,
    2581          897 :         ancestor: Option<Arc<Timeline>>,
    2582          897 :         timeline_id: TimelineId,
    2583          897 :         tenant_shard_id: TenantShardId,
    2584          897 :         generation: Generation,
    2585          897 :         shard_identity: ShardIdentity,
    2586          897 :         walredo_mgr: Option<Arc<super::WalRedoManager>>,
    2587          897 :         resources: TimelineResources,
    2588          897 :         pg_version: u32,
    2589          897 :         state: TimelineState,
    2590          897 :         attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
    2591          897 :         create_idempotency: crate::tenant::CreateTimelineIdempotency,
    2592          897 :         cancel: CancellationToken,
    2593          897 :     ) -> Arc<Self> {
    2594          897 :         let disk_consistent_lsn = metadata.disk_consistent_lsn();
    2595          897 :         let (state, _) = watch::channel(state);
    2596          897 : 
    2597          897 :         let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
    2598          897 :         let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
    2599          897 : 
    2600          897 :         let evictions_low_residence_duration_metric_threshold = {
    2601          897 :             let loaded_tenant_conf = tenant_conf.load();
    2602          897 :             Self::get_evictions_low_residence_duration_metric_threshold(
    2603          897 :                 &loaded_tenant_conf.tenant_conf,
    2604          897 :                 &conf.default_tenant_conf,
    2605          897 :             )
    2606              :         };
    2607              : 
    2608          897 :         if let Some(ancestor) = &ancestor {
    2609          460 :             let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
    2610          460 :             // If we construct an explicit timeline object, it's obviously not offloaded
    2611          460 :             let is_offloaded = MaybeOffloaded::No;
    2612          460 :             ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
    2613          460 :         }
    2614              : 
    2615          897 :         Arc::new_cyclic(|myself| {
    2616          897 :             let metrics = TimelineMetrics::new(
    2617          897 :                 &tenant_shard_id,
    2618          897 :                 &timeline_id,
    2619          897 :                 crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
    2620          897 :                     "mtime",
    2621          897 :                     evictions_low_residence_duration_metric_threshold,
    2622          897 :                 ),
    2623          897 :             );
    2624          897 :             let aux_file_metrics = metrics.aux_file_size_gauge.clone();
    2625              : 
    2626          897 :             let mut result = Timeline {
    2627          897 :                 conf,
    2628          897 :                 tenant_conf,
    2629          897 :                 myself: myself.clone(),
    2630          897 :                 timeline_id,
    2631          897 :                 tenant_shard_id,
    2632          897 :                 generation,
    2633          897 :                 shard_identity,
    2634          897 :                 pg_version,
    2635          897 :                 layers: Default::default(),
    2636          897 :                 gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
    2637          897 : 
    2638          897 :                 walredo_mgr,
    2639          897 :                 walreceiver: Mutex::new(None),
    2640          897 : 
    2641          897 :                 remote_client: Arc::new(resources.remote_client),
    2642          897 : 
    2643          897 :                 // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
    2644          897 :                 last_record_lsn: SeqWait::new(RecordLsn {
    2645          897 :                     last: disk_consistent_lsn,
    2646          897 :                     prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
    2647          897 :                 }),
    2648          897 :                 disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
    2649          897 : 
    2650          897 :                 last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
    2651          897 :                 last_freeze_ts: RwLock::new(Instant::now()),
    2652          897 : 
    2653          897 :                 loaded_at: (disk_consistent_lsn, SystemTime::now()),
    2654          897 : 
    2655          897 :                 ancestor_timeline: ancestor,
    2656          897 :                 ancestor_lsn: metadata.ancestor_lsn(),
    2657          897 : 
    2658          897 :                 metrics,
    2659          897 : 
    2660          897 :                 query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
    2661          897 :                     &tenant_shard_id,
    2662          897 :                     &timeline_id,
    2663          897 :                     resources.pagestream_throttle_metrics,
    2664          897 :                 ),
    2665          897 : 
    2666         6279 :                 directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
    2667          897 : 
    2668          897 :                 flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
    2669          897 : 
    2670          897 :                 layer_flush_start_tx,
    2671          897 :                 layer_flush_done_tx,
    2672          897 : 
    2673          897 :                 write_lock: tokio::sync::Mutex::new(None),
    2674          897 : 
    2675          897 :                 gc_info: std::sync::RwLock::new(GcInfo::default()),
    2676          897 : 
    2677          897 :                 last_image_layer_creation_status: ArcSwap::new(Arc::new(
    2678          897 :                     LastImageLayerCreationStatus::default(),
    2679          897 :                 )),
    2680          897 : 
    2681          897 :                 applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
    2682          897 :                 initdb_lsn: metadata.initdb_lsn(),
    2683          897 : 
    2684          897 :                 current_logical_size: if disk_consistent_lsn.is_valid() {
    2685              :                     // we're creating timeline data with some layer files existing locally,
    2686              :                     // need to recalculate timeline's logical size based on data in the layers.
    2687          468 :                     LogicalSize::deferred_initial(disk_consistent_lsn)
    2688              :                 } else {
    2689              :                     // we're creating timeline data without any layers existing locally,
    2690              :                     // initial logical size is 0.
    2691          429 :                     LogicalSize::empty_initial()
    2692              :                 },
    2693              : 
    2694          897 :                 partitioning: GuardArcSwap::new((
    2695          897 :                     (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
    2696          897 :                     Lsn(0),
    2697          897 :                 )),
    2698          897 :                 repartition_threshold: 0,
    2699          897 :                 last_image_layer_creation_check_at: AtomicLsn::new(0),
    2700          897 :                 last_image_layer_creation_check_instant: Mutex::new(None),
    2701          897 : 
    2702          897 :                 last_received_wal: Mutex::new(None),
    2703          897 :                 rel_size_cache: RwLock::new(RelSizeCache {
    2704          897 :                     complete_as_of: disk_consistent_lsn,
    2705          897 :                     map: HashMap::new(),
    2706          897 :                 }),
    2707          897 : 
    2708          897 :                 download_all_remote_layers_task_info: RwLock::new(None),
    2709          897 : 
    2710          897 :                 state,
    2711          897 : 
    2712          897 :                 eviction_task_timeline_state: tokio::sync::Mutex::new(
    2713          897 :                     EvictionTaskTimelineState::default(),
    2714          897 :                 ),
    2715          897 :                 delete_progress: TimelineDeleteProgress::default(),
    2716          897 : 
    2717          897 :                 cancel,
    2718          897 :                 gate: Gate::default(),
    2719          897 : 
    2720          897 :                 compaction_lock: tokio::sync::Mutex::default(),
    2721          897 :                 compaction_failed: AtomicBool::default(),
    2722          897 :                 l0_compaction_trigger: resources.l0_compaction_trigger,
    2723          897 :                 gc_lock: tokio::sync::Mutex::default(),
    2724          897 : 
    2725          897 :                 standby_horizon: AtomicLsn::new(0),
    2726          897 : 
    2727          897 :                 pagestream_throttle: resources.pagestream_throttle,
    2728          897 : 
    2729          897 :                 aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
    2730          897 : 
    2731          897 :                 #[cfg(test)]
    2732          897 :                 extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
    2733          897 : 
    2734          897 :                 l0_flush_global_state: resources.l0_flush_global_state,
    2735          897 : 
    2736          897 :                 handles: Default::default(),
    2737          897 : 
    2738          897 :                 attach_wal_lag_cooldown,
    2739          897 : 
    2740          897 :                 create_idempotency,
    2741          897 : 
    2742          897 :                 page_trace: Default::default(),
    2743          897 : 
    2744          897 :                 previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
    2745          897 :             };
    2746          897 : 
    2747          897 :             result.repartition_threshold =
    2748          897 :                 result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
    2749          897 : 
    2750          897 :             result
    2751          897 :                 .metrics
    2752          897 :                 .last_record_lsn_gauge
    2753          897 :                 .set(disk_consistent_lsn.0 as i64);
    2754          897 :             result
    2755          897 :         })
    2756          897 :     }
    2757              : 
    2758         1298 :     pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
    2759         1298 :         let Ok(guard) = self.gate.enter() else {
    2760            0 :             info!("cannot start flush loop when the timeline gate has already been closed");
    2761            0 :             return;
    2762              :         };
    2763         1298 :         let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
    2764         1298 :         match *flush_loop_state {
    2765          885 :             FlushLoopState::NotStarted => (),
    2766              :             FlushLoopState::Running { .. } => {
    2767          413 :                 info!(
    2768            0 :                     "skipping attempt to start flush_loop twice {}/{}",
    2769            0 :                     self.tenant_shard_id, self.timeline_id
    2770              :                 );
    2771          413 :                 return;
    2772              :             }
    2773              :             FlushLoopState::Exited => {
    2774            0 :                 info!(
    2775            0 :                     "ignoring attempt to restart exited flush_loop {}/{}",
    2776            0 :                     self.tenant_shard_id, self.timeline_id
    2777              :                 );
    2778            0 :                 return;
    2779              :             }
    2780              :         }
    2781              : 
    2782          885 :         let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
    2783          885 :         let self_clone = Arc::clone(self);
    2784          885 : 
    2785          885 :         debug!("spawning flush loop");
    2786          885 :         *flush_loop_state = FlushLoopState::Running {
    2787          885 :             #[cfg(test)]
    2788          885 :             expect_initdb_optimization: false,
    2789          885 :             #[cfg(test)]
    2790          885 :             initdb_optimization_count: 0,
    2791          885 :         };
    2792          885 :         task_mgr::spawn(
    2793          885 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2794          885 :             task_mgr::TaskKind::LayerFlushTask,
    2795          885 :             self.tenant_shard_id,
    2796          885 :             Some(self.timeline_id),
    2797          885 :             "layer flush task",
    2798          885 :             async move {
    2799          885 :                 let _guard = guard;
    2800          885 :                 let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
    2801          885 :                 self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
    2802           20 :                 let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
    2803           20 :                 assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
    2804           20 :                 *flush_loop_state  = FlushLoopState::Exited;
    2805           20 :                 Ok(())
    2806           20 :             }
    2807          885 :             .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    2808              :         );
    2809         1298 :     }
    2810              : 
    2811              :     /// Creates and starts the wal receiver.
    2812              :     ///
    2813              :     /// This function is expected to be called at most once per Timeline's lifecycle
    2814              :     /// when the timeline is activated.
    2815            0 :     fn launch_wal_receiver(
    2816            0 :         self: &Arc<Self>,
    2817            0 :         ctx: &RequestContext,
    2818            0 :         broker_client: BrokerClientChannel,
    2819            0 :     ) {
    2820            0 :         info!(
    2821            0 :             "launching WAL receiver for timeline {} of tenant {}",
    2822            0 :             self.timeline_id, self.tenant_shard_id
    2823              :         );
    2824              : 
    2825            0 :         let tenant_conf = self.tenant_conf.load();
    2826            0 :         let wal_connect_timeout = tenant_conf
    2827            0 :             .tenant_conf
    2828            0 :             .walreceiver_connect_timeout
    2829            0 :             .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
    2830            0 :         let lagging_wal_timeout = tenant_conf
    2831            0 :             .tenant_conf
    2832            0 :             .lagging_wal_timeout
    2833            0 :             .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
    2834            0 :         let max_lsn_wal_lag = tenant_conf
    2835            0 :             .tenant_conf
    2836            0 :             .max_lsn_wal_lag
    2837            0 :             .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
    2838            0 : 
    2839            0 :         let mut guard = self.walreceiver.lock().unwrap();
    2840            0 :         assert!(
    2841            0 :             guard.is_none(),
    2842            0 :             "multiple launches / re-launches of WAL receiver are not supported"
    2843              :         );
    2844            0 :         *guard = Some(WalReceiver::start(
    2845            0 :             Arc::clone(self),
    2846            0 :             WalReceiverConf {
    2847            0 :                 protocol: self.resolve_wal_receiver_protocol(),
    2848            0 :                 wal_connect_timeout,
    2849            0 :                 lagging_wal_timeout,
    2850            0 :                 max_lsn_wal_lag,
    2851            0 :                 auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
    2852            0 :                 availability_zone: self.conf.availability_zone.clone(),
    2853            0 :                 ingest_batch_size: self.conf.ingest_batch_size,
    2854            0 :             },
    2855            0 :             broker_client,
    2856            0 :             ctx,
    2857            0 :         ));
    2858            0 :     }
    2859              : 
    2860              :     /// Initialize with an empty layer map. Used when creating a new timeline.
    2861          885 :     pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
    2862          885 :         let mut layers = self.layers.try_write().expect(
    2863          885 :             "in the context where we call this function, no other task has access to the object",
    2864          885 :         );
    2865          885 :         layers
    2866          885 :             .open_mut()
    2867          885 :             .expect("in this context the LayerManager must still be open")
    2868          885 :             .initialize_empty(Lsn(start_lsn.0));
    2869          885 :     }
    2870              : 
    2871              :     /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
    2872              :     /// files.
    2873           12 :     pub(super) async fn load_layer_map(
    2874           12 :         &self,
    2875           12 :         disk_consistent_lsn: Lsn,
    2876           12 :         index_part: IndexPart,
    2877           12 :     ) -> anyhow::Result<()> {
    2878              :         use init::{Decision::*, Discovered, DismissedLayer};
    2879              :         use LayerName::*;
    2880              : 
    2881           12 :         let mut guard = self.layers.write().await;
    2882              : 
    2883           12 :         let timer = self.metrics.load_layer_map_histo.start_timer();
    2884           12 : 
    2885           12 :         // Scan timeline directory and create ImageLayerName and DeltaFilename
    2886           12 :         // structs representing all files on disk
    2887           12 :         let timeline_path = self
    2888           12 :             .conf
    2889           12 :             .timeline_path(&self.tenant_shard_id, &self.timeline_id);
    2890           12 :         let conf = self.conf;
    2891           12 :         let span = tracing::Span::current();
    2892           12 : 
    2893           12 :         // Copy to move into the task we're about to spawn
    2894           12 :         let this = self.myself.upgrade().expect("&self method holds the arc");
    2895              : 
    2896           12 :         let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
    2897           12 :             move || {
    2898           12 :                 let _g = span.entered();
    2899           12 :                 let discovered = init::scan_timeline_dir(&timeline_path)?;
    2900           12 :                 let mut discovered_layers = Vec::with_capacity(discovered.len());
    2901           12 :                 let mut unrecognized_files = Vec::new();
    2902           12 : 
    2903           12 :                 let mut path = timeline_path;
    2904              : 
    2905           44 :                 for discovered in discovered {
    2906           32 :                     let (name, kind) = match discovered {
    2907           32 :                         Discovered::Layer(layer_file_name, local_metadata) => {
    2908           32 :                             discovered_layers.push((layer_file_name, local_metadata));
    2909           32 :                             continue;
    2910              :                         }
    2911            0 :                         Discovered::IgnoredBackup(path) => {
    2912            0 :                             std::fs::remove_file(path)
    2913            0 :                                 .or_else(fs_ext::ignore_not_found)
    2914            0 :                                 .fatal_err("Removing .old file");
    2915            0 :                             continue;
    2916              :                         }
    2917            0 :                         Discovered::Unknown(file_name) => {
    2918            0 :                             // we will later error if there are any
    2919            0 :                             unrecognized_files.push(file_name);
    2920            0 :                             continue;
    2921              :                         }
    2922            0 :                         Discovered::Ephemeral(name) => (name, "old ephemeral file"),
    2923            0 :                         Discovered::Temporary(name) => (name, "temporary timeline file"),
    2924            0 :                         Discovered::TemporaryDownload(name) => (name, "temporary download"),
    2925              :                     };
    2926            0 :                     path.push(Utf8Path::new(&name));
    2927            0 :                     init::cleanup(&path, kind)?;
    2928            0 :                     path.pop();
    2929              :                 }
    2930              : 
    2931           12 :                 if !unrecognized_files.is_empty() {
    2932              :                     // assume that if there are any there are many many.
    2933            0 :                     let n = unrecognized_files.len();
    2934            0 :                     let first = &unrecognized_files[..n.min(10)];
    2935            0 :                     anyhow::bail!(
    2936            0 :                         "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
    2937            0 :                     );
    2938           12 :                 }
    2939           12 : 
    2940           12 :                 let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
    2941           12 : 
    2942           12 :                 let mut loaded_layers = Vec::new();
    2943           12 :                 let mut needs_cleanup = Vec::new();
    2944           12 :                 let mut total_physical_size = 0;
    2945              : 
    2946           44 :                 for (name, decision) in decided {
    2947           32 :                     let decision = match decision {
    2948           32 :                         Ok(decision) => decision,
    2949            0 :                         Err(DismissedLayer::Future { local }) => {
    2950            0 :                             if let Some(local) = local {
    2951            0 :                                 init::cleanup_future_layer(
    2952            0 :                                     &local.local_path,
    2953            0 :                                     &name,
    2954            0 :                                     disk_consistent_lsn,
    2955            0 :                                 )?;
    2956            0 :                             }
    2957            0 :                             needs_cleanup.push(name);
    2958            0 :                             continue;
    2959              :                         }
    2960            0 :                         Err(DismissedLayer::LocalOnly(local)) => {
    2961            0 :                             init::cleanup_local_only_file(&name, &local)?;
    2962              :                             // this file never existed remotely, we will have to do rework
    2963            0 :                             continue;
    2964              :                         }
    2965            0 :                         Err(DismissedLayer::BadMetadata(local)) => {
    2966            0 :                             init::cleanup_local_file_for_remote(&local)?;
    2967              :                             // this file never existed remotely, we will have to do rework
    2968            0 :                             continue;
    2969              :                         }
    2970              :                     };
    2971              : 
    2972           32 :                     match &name {
    2973           24 :                         Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
    2974            8 :                         Image(i) => assert!(i.lsn <= disk_consistent_lsn),
    2975              :                     }
    2976              : 
    2977           32 :                     tracing::debug!(layer=%name, ?decision, "applied");
    2978              : 
    2979           32 :                     let layer = match decision {
    2980           32 :                         Resident { local, remote } => {
    2981           32 :                             total_physical_size += local.file_size;
    2982           32 :                             Layer::for_resident(conf, &this, local.local_path, name, remote)
    2983           32 :                                 .drop_eviction_guard()
    2984              :                         }
    2985            0 :                         Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
    2986              :                     };
    2987              : 
    2988           32 :                     loaded_layers.push(layer);
    2989              :                 }
    2990           12 :                 Ok((loaded_layers, needs_cleanup, total_physical_size))
    2991           12 :             }
    2992           12 :         })
    2993           12 :         .await
    2994           12 :         .map_err(anyhow::Error::new)
    2995           12 :         .and_then(|x| x)?;
    2996              : 
    2997           12 :         let num_layers = loaded_layers.len();
    2998           12 : 
    2999           12 :         guard
    3000           12 :             .open_mut()
    3001           12 :             .expect("layermanager must be open during init")
    3002           12 :             .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
    3003           12 : 
    3004           12 :         self.remote_client
    3005           12 :             .schedule_layer_file_deletion(&needs_cleanup)?;
    3006           12 :         self.remote_client
    3007           12 :             .schedule_index_upload_for_file_changes()?;
    3008              :         // This barrier orders above DELETEs before any later operations.
    3009              :         // This is critical because code executing after the barrier might
    3010              :         // create again objects with the same key that we just scheduled for deletion.
    3011              :         // For example, if we just scheduled deletion of an image layer "from the future",
    3012              :         // later compaction might run again and re-create the same image layer.
    3013              :         // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
    3014              :         // "same" here means same key range and LSN.
    3015              :         //
    3016              :         // Without a barrier between above DELETEs and the re-creation's PUTs,
    3017              :         // the upload queue may execute the PUT first, then the DELETE.
    3018              :         // In our example, we will end up with an IndexPart referencing a non-existent object.
    3019              :         //
    3020              :         // 1. a future image layer is created and uploaded
    3021              :         // 2. ps restart
    3022              :         // 3. the future layer from (1) is deleted during load layer map
    3023              :         // 4. image layer is re-created and uploaded
    3024              :         // 5. deletion queue would like to delete (1) but actually deletes (4)
    3025              :         // 6. delete by name works as expected, but it now deletes the wrong (later) version
    3026              :         //
    3027              :         // See https://github.com/neondatabase/neon/issues/5878
    3028              :         //
    3029              :         // NB: generation numbers naturally protect against this because they disambiguate
    3030              :         //     (1) and (4)
    3031              :         // TODO: this is basically a no-op now, should we remove it?
    3032           12 :         self.remote_client.schedule_barrier()?;
    3033              :         // Tenant::create_timeline will wait for these uploads to happen before returning, or
    3034              :         // on retry.
    3035              : 
    3036              :         // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
    3037           12 :         drop(guard); // drop write lock, update_layer_visibility will take a read lock.
    3038           12 :         self.update_layer_visibility().await?;
    3039              : 
    3040           12 :         info!(
    3041            0 :             "loaded layer map with {} layers at {}, total physical size: {}",
    3042              :             num_layers, disk_consistent_lsn, total_physical_size
    3043              :         );
    3044              : 
    3045           12 :         timer.stop_and_record();
    3046           12 :         Ok(())
    3047           12 :     }
    3048              : 
    3049              :     /// Retrieve current logical size of the timeline.
    3050              :     ///
    3051              :     /// The size could be lagging behind the actual number, in case
    3052              :     /// the initial size calculation has not been run (gets triggered on the first size access).
    3053              :     ///
    3054              :     /// return size and boolean flag that shows if the size is exact
    3055            0 :     pub(crate) fn get_current_logical_size(
    3056            0 :         self: &Arc<Self>,
    3057            0 :         priority: GetLogicalSizePriority,
    3058            0 :         ctx: &RequestContext,
    3059            0 :     ) -> logical_size::CurrentLogicalSize {
    3060            0 :         if !self.tenant_shard_id.is_shard_zero() {
    3061              :             // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
    3062              :             // when HTTP API is serving a GET for timeline zero, return zero
    3063            0 :             return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
    3064            0 :         }
    3065            0 : 
    3066            0 :         let current_size = self.current_logical_size.current_size();
    3067            0 :         debug!("Current size: {current_size:?}");
    3068              : 
    3069            0 :         match (current_size.accuracy(), priority) {
    3070            0 :             (logical_size::Accuracy::Exact, _) => (), // nothing to do
    3071            0 :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
    3072            0 :                 // background task will eventually deliver an exact value, we're in no rush
    3073            0 :             }
    3074              :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
    3075              :                 // background task is not ready, but user is asking for it now;
    3076              :                 // => make the background task skip the line
    3077              :                 // (The alternative would be to calculate the size here, but,
    3078              :                 //  it can actually take a long time if the user has a lot of rels.
    3079              :                 //  And we'll inevitable need it again; So, let the background task do the work.)
    3080            0 :                 match self
    3081            0 :                     .current_logical_size
    3082            0 :                     .cancel_wait_for_background_loop_concurrency_limit_semaphore
    3083            0 :                     .get()
    3084              :                 {
    3085            0 :                     Some(cancel) => cancel.cancel(),
    3086              :                     None => {
    3087            0 :                         match self.current_state() {
    3088            0 :                             TimelineState::Broken { .. } | TimelineState::Stopping => {
    3089            0 :                                 // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
    3090            0 :                                 // Don't make noise.
    3091            0 :                             }
    3092              :                             TimelineState::Loading => {
    3093              :                                 // Import does not return an activated timeline.
    3094            0 :                                 info!("discarding priority boost for logical size calculation because timeline is not yet active");
    3095              :                             }
    3096              :                             TimelineState::Active => {
    3097              :                                 // activation should be setting the once cell
    3098            0 :                                 warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
    3099            0 :                                 debug_assert!(false);
    3100              :                             }
    3101              :                         }
    3102              :                     }
    3103              :                 }
    3104              :             }
    3105              :         }
    3106              : 
    3107            0 :         if let CurrentLogicalSize::Approximate(_) = &current_size {
    3108            0 :             if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
    3109            0 :                 let first = self
    3110            0 :                     .current_logical_size
    3111            0 :                     .did_return_approximate_to_walreceiver
    3112            0 :                     .compare_exchange(
    3113            0 :                         false,
    3114            0 :                         true,
    3115            0 :                         AtomicOrdering::Relaxed,
    3116            0 :                         AtomicOrdering::Relaxed,
    3117            0 :                     )
    3118            0 :                     .is_ok();
    3119            0 :                 if first {
    3120            0 :                     crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
    3121            0 :                 }
    3122            0 :             }
    3123            0 :         }
    3124              : 
    3125            0 :         current_size
    3126            0 :     }
    3127              : 
    3128            0 :     fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
    3129            0 :         let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
    3130              :             // nothing to do for freshly created timelines;
    3131            0 :             assert_eq!(
    3132            0 :                 self.current_logical_size.current_size().accuracy(),
    3133            0 :                 logical_size::Accuracy::Exact,
    3134            0 :             );
    3135            0 :             self.current_logical_size.initialized.add_permits(1);
    3136            0 :             return;
    3137              :         };
    3138              : 
    3139            0 :         let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
    3140            0 :         let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
    3141            0 :         self.current_logical_size
    3142            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
    3143            0 :             .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
    3144            0 : 
    3145            0 :         let self_clone = Arc::clone(self);
    3146            0 :         let background_ctx = ctx.detached_child(
    3147            0 :             TaskKind::InitialLogicalSizeCalculation,
    3148            0 :             DownloadBehavior::Download,
    3149            0 :         );
    3150            0 :         task_mgr::spawn(
    3151            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3152            0 :             task_mgr::TaskKind::InitialLogicalSizeCalculation,
    3153            0 :             self.tenant_shard_id,
    3154            0 :             Some(self.timeline_id),
    3155            0 :             "initial size calculation",
    3156              :             // NB: don't log errors here, task_mgr will do that.
    3157            0 :             async move {
    3158            0 :                 self_clone
    3159            0 :                     .initial_logical_size_calculation_task(
    3160            0 :                         initial_part_end,
    3161            0 :                         cancel_wait_for_background_loop_concurrency_limit_semaphore,
    3162            0 :                         background_ctx,
    3163            0 :                     )
    3164            0 :                     .await;
    3165            0 :                 Ok(())
    3166            0 :             }
    3167            0 :             .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
    3168              :         );
    3169            0 :     }
    3170              : 
    3171              :     /// # Cancellation
    3172              :     ///
    3173              :     /// This method is sensitive to `Timeline::cancel`.
    3174              :     ///
    3175              :     /// It is _not_ sensitive to task_mgr::shutdown_token().
    3176              :     ///
    3177              :     /// # Cancel-Safety
    3178              :     ///
    3179              :     /// It does Timeline IO, hence this should be polled to completion because
    3180              :     /// we could be leaving in-flight IOs behind, which is safe, but annoying
    3181              :     /// to reason about.
    3182            0 :     async fn initial_logical_size_calculation_task(
    3183            0 :         self: Arc<Self>,
    3184            0 :         initial_part_end: Lsn,
    3185            0 :         skip_concurrency_limiter: CancellationToken,
    3186            0 :         background_ctx: RequestContext,
    3187            0 :     ) {
    3188            0 :         scopeguard::defer! {
    3189            0 :             // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
    3190            0 :             self.current_logical_size.initialized.add_permits(1);
    3191            0 :         }
    3192            0 : 
    3193            0 :         let try_once = |attempt: usize| {
    3194            0 :             let background_ctx = &background_ctx;
    3195            0 :             let self_ref = &self;
    3196            0 :             let skip_concurrency_limiter = &skip_concurrency_limiter;
    3197            0 :             async move {
    3198            0 :                 let wait_for_permit = super::tasks::acquire_concurrency_permit(
    3199            0 :                     BackgroundLoopKind::InitialLogicalSizeCalculation,
    3200            0 :                     background_ctx,
    3201            0 :                 );
    3202              : 
    3203              :                 use crate::metrics::initial_logical_size::StartCircumstances;
    3204            0 :                 let (_maybe_permit, circumstances) = tokio::select! {
    3205            0 :                     permit = wait_for_permit => {
    3206            0 :                         (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
    3207              :                     }
    3208            0 :                     _ = self_ref.cancel.cancelled() => {
    3209            0 :                         return Err(CalculateLogicalSizeError::Cancelled);
    3210              :                     }
    3211            0 :                     () = skip_concurrency_limiter.cancelled() => {
    3212              :                         // Some action that is part of a end user interaction requested logical size
    3213              :                         // => break out of the rate limit
    3214              :                         // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
    3215              :                         // but then again what happens if they cancel; also, we should just be using
    3216              :                         // one runtime across the entire process, so, let's leave this for now.
    3217            0 :                         (None, StartCircumstances::SkippedConcurrencyLimiter)
    3218              :                     }
    3219              :                 };
    3220              : 
    3221            0 :                 let metrics_guard = if attempt == 1 {
    3222            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
    3223              :                 } else {
    3224            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
    3225              :                 };
    3226              : 
    3227            0 :                 let io_concurrency = IoConcurrency::spawn_from_conf(
    3228            0 :                     self_ref.conf,
    3229            0 :                     self_ref
    3230            0 :                         .gate
    3231            0 :                         .enter()
    3232            0 :                         .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
    3233              :                 );
    3234              : 
    3235            0 :                 let calculated_size = self_ref
    3236            0 :                     .logical_size_calculation_task(
    3237            0 :                         initial_part_end,
    3238            0 :                         LogicalSizeCalculationCause::Initial,
    3239            0 :                         background_ctx,
    3240            0 :                     )
    3241            0 :                     .await?;
    3242              : 
    3243            0 :                 self_ref
    3244            0 :                     .trigger_aux_file_size_computation(
    3245            0 :                         initial_part_end,
    3246            0 :                         background_ctx,
    3247            0 :                         io_concurrency,
    3248            0 :                     )
    3249            0 :                     .await?;
    3250              : 
    3251              :                 // TODO: add aux file size to logical size
    3252              : 
    3253            0 :                 Ok((calculated_size, metrics_guard))
    3254            0 :             }
    3255            0 :         };
    3256              : 
    3257            0 :         let retrying = async {
    3258            0 :             let mut attempt = 0;
    3259              :             loop {
    3260            0 :                 attempt += 1;
    3261            0 : 
    3262            0 :                 match try_once(attempt).await {
    3263            0 :                     Ok(res) => return ControlFlow::Continue(res),
    3264            0 :                     Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
    3265              :                     Err(
    3266            0 :                         e @ (CalculateLogicalSizeError::Decode(_)
    3267            0 :                         | CalculateLogicalSizeError::PageRead(_)),
    3268            0 :                     ) => {
    3269            0 :                         warn!(attempt, "initial size calculation failed: {e:?}");
    3270              :                         // exponential back-off doesn't make sense at these long intervals;
    3271              :                         // use fixed retry interval with generous jitter instead
    3272            0 :                         let sleep_duration = Duration::from_secs(
    3273            0 :                             u64::try_from(
    3274            0 :                                 // 1hour base
    3275            0 :                                 (60_i64 * 60_i64)
    3276            0 :                                     // 10min jitter
    3277            0 :                                     + rand::thread_rng().gen_range(-10 * 60..10 * 60),
    3278            0 :                             )
    3279            0 :                             .expect("10min < 1hour"),
    3280            0 :                         );
    3281            0 :                         tokio::select! {
    3282            0 :                             _ = tokio::time::sleep(sleep_duration) => {}
    3283            0 :                             _ = self.cancel.cancelled() => return ControlFlow::Break(()),
    3284              :                         }
    3285              :                     }
    3286              :                 }
    3287              :             }
    3288            0 :         };
    3289              : 
    3290            0 :         let (calculated_size, metrics_guard) = match retrying.await {
    3291            0 :             ControlFlow::Continue(calculated_size) => calculated_size,
    3292            0 :             ControlFlow::Break(()) => return,
    3293              :         };
    3294              : 
    3295              :         // we cannot query current_logical_size.current_size() to know the current
    3296              :         // *negative* value, only truncated to u64.
    3297            0 :         let added = self
    3298            0 :             .current_logical_size
    3299            0 :             .size_added_after_initial
    3300            0 :             .load(AtomicOrdering::Relaxed);
    3301            0 : 
    3302            0 :         let sum = calculated_size.saturating_add_signed(added);
    3303            0 : 
    3304            0 :         // set the gauge value before it can be set in `update_current_logical_size`.
    3305            0 :         self.metrics.current_logical_size_gauge.set(sum);
    3306            0 : 
    3307            0 :         self.current_logical_size
    3308            0 :             .initial_logical_size
    3309            0 :             .set((calculated_size, metrics_guard.calculation_result_saved()))
    3310            0 :             .ok()
    3311            0 :             .expect("only this task sets it");
    3312            0 :     }
    3313              : 
    3314            0 :     pub(crate) fn spawn_ondemand_logical_size_calculation(
    3315            0 :         self: &Arc<Self>,
    3316            0 :         lsn: Lsn,
    3317            0 :         cause: LogicalSizeCalculationCause,
    3318            0 :         ctx: RequestContext,
    3319            0 :     ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
    3320            0 :         let (sender, receiver) = oneshot::channel();
    3321            0 :         let self_clone = Arc::clone(self);
    3322            0 :         // XXX if our caller loses interest, i.e., ctx is cancelled,
    3323            0 :         // we should stop the size calculation work and return an error.
    3324            0 :         // That would require restructuring this function's API to
    3325            0 :         // return the result directly, instead of a Receiver for the result.
    3326            0 :         let ctx = ctx.detached_child(
    3327            0 :             TaskKind::OndemandLogicalSizeCalculation,
    3328            0 :             DownloadBehavior::Download,
    3329            0 :         );
    3330            0 :         task_mgr::spawn(
    3331            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3332            0 :             task_mgr::TaskKind::OndemandLogicalSizeCalculation,
    3333            0 :             self.tenant_shard_id,
    3334            0 :             Some(self.timeline_id),
    3335            0 :             "ondemand logical size calculation",
    3336            0 :             async move {
    3337            0 :                 let res = self_clone
    3338            0 :                     .logical_size_calculation_task(lsn, cause, &ctx)
    3339            0 :                     .await;
    3340            0 :                 let _ = sender.send(res).ok();
    3341            0 :                 Ok(()) // Receiver is responsible for handling errors
    3342            0 :             }
    3343            0 :             .in_current_span(),
    3344            0 :         );
    3345            0 :         receiver
    3346            0 :     }
    3347              : 
    3348              :     #[instrument(skip_all)]
    3349              :     async fn logical_size_calculation_task(
    3350              :         self: &Arc<Self>,
    3351              :         lsn: Lsn,
    3352              :         cause: LogicalSizeCalculationCause,
    3353              :         ctx: &RequestContext,
    3354              :     ) -> Result<u64, CalculateLogicalSizeError> {
    3355              :         crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
    3356              :         // We should never be calculating logical sizes on shard !=0, because these shards do not have
    3357              :         // accurate relation sizes, and they do not emit consumption metrics.
    3358              :         debug_assert!(self.tenant_shard_id.is_shard_zero());
    3359              : 
    3360              :         let guard = self
    3361              :             .gate
    3362              :             .enter()
    3363            0 :             .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
    3364              : 
    3365              :         self.calculate_logical_size(lsn, cause, &guard, ctx).await
    3366              :     }
    3367              : 
    3368              :     /// Calculate the logical size of the database at the latest LSN.
    3369              :     ///
    3370              :     /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
    3371              :     /// especially if we need to download remote layers.
    3372            0 :     async fn calculate_logical_size(
    3373            0 :         &self,
    3374            0 :         up_to_lsn: Lsn,
    3375            0 :         cause: LogicalSizeCalculationCause,
    3376            0 :         _guard: &GateGuard,
    3377            0 :         ctx: &RequestContext,
    3378            0 :     ) -> Result<u64, CalculateLogicalSizeError> {
    3379            0 :         info!(
    3380            0 :             "Calculating logical size for timeline {} at {}",
    3381              :             self.timeline_id, up_to_lsn
    3382              :         );
    3383              : 
    3384            0 :         if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
    3385              :         {
    3386            0 :             return Err(CalculateLogicalSizeError::Cancelled);
    3387            0 :         }
    3388              : 
    3389              :         // See if we've already done the work for initial size calculation.
    3390              :         // This is a short-cut for timelines that are mostly unused.
    3391            0 :         if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
    3392            0 :             return Ok(size);
    3393            0 :         }
    3394            0 :         let storage_time_metrics = match cause {
    3395              :             LogicalSizeCalculationCause::Initial
    3396              :             | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
    3397            0 :             | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
    3398              :             LogicalSizeCalculationCause::EvictionTaskImitation => {
    3399            0 :                 &self.metrics.imitate_logical_size_histo
    3400              :             }
    3401              :         };
    3402            0 :         let timer = storage_time_metrics.start_timer();
    3403            0 :         let logical_size = self
    3404            0 :             .get_current_logical_size_non_incremental(up_to_lsn, ctx)
    3405            0 :             .await?;
    3406            0 :         debug!("calculated logical size: {logical_size}");
    3407            0 :         timer.stop_and_record();
    3408            0 :         Ok(logical_size)
    3409            0 :     }
    3410              : 
    3411              :     /// Update current logical size, adding `delta' to the old value.
    3412       541140 :     fn update_current_logical_size(&self, delta: i64) {
    3413       541140 :         let logical_size = &self.current_logical_size;
    3414       541140 :         logical_size.increment_size(delta);
    3415       541140 : 
    3416       541140 :         // Also set the value in the prometheus gauge. Note that
    3417       541140 :         // there is a race condition here: if this is is called by two
    3418       541140 :         // threads concurrently, the prometheus gauge might be set to
    3419       541140 :         // one value while current_logical_size is set to the
    3420       541140 :         // other.
    3421       541140 :         match logical_size.current_size() {
    3422       541140 :             CurrentLogicalSize::Exact(ref new_current_size) => self
    3423       541140 :                 .metrics
    3424       541140 :                 .current_logical_size_gauge
    3425       541140 :                 .set(new_current_size.into()),
    3426            0 :             CurrentLogicalSize::Approximate(_) => {
    3427            0 :                 // don't update the gauge yet, this allows us not to update the gauge back and
    3428            0 :                 // forth between the initial size calculation task.
    3429            0 :             }
    3430              :         }
    3431       541140 :     }
    3432              : 
    3433         5937 :     pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
    3434         5937 :         self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
    3435         5937 :         let aux_metric =
    3436         5937 :             self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
    3437         5937 : 
    3438         5937 :         let sum_of_entries = self
    3439         5937 :             .directory_metrics
    3440         5937 :             .iter()
    3441        41559 :             .map(|v| v.load(AtomicOrdering::Relaxed))
    3442         5937 :             .sum();
    3443              :         // Set a high general threshold and a lower threshold for the auxiliary files,
    3444              :         // as we can have large numbers of relations in the db directory.
    3445              :         const SUM_THRESHOLD: u64 = 5000;
    3446              :         const AUX_THRESHOLD: u64 = 1000;
    3447         5937 :         if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
    3448            0 :             self.metrics
    3449            0 :                 .directory_entries_count_gauge
    3450            0 :                 .set(sum_of_entries);
    3451         5937 :         } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
    3452            0 :             metric.set(sum_of_entries);
    3453         5937 :         }
    3454         5937 :     }
    3455              : 
    3456            0 :     async fn find_layer(
    3457            0 :         &self,
    3458            0 :         layer_name: &LayerName,
    3459            0 :     ) -> Result<Option<Layer>, layer_manager::Shutdown> {
    3460            0 :         let guard = self.layers.read().await;
    3461            0 :         let layer = guard
    3462            0 :             .layer_map()?
    3463            0 :             .iter_historic_layers()
    3464            0 :             .find(|l| &l.layer_name() == layer_name)
    3465            0 :             .map(|found| guard.get_from_desc(&found));
    3466            0 :         Ok(layer)
    3467            0 :     }
    3468              : 
    3469              :     /// The timeline heatmap is a hint to secondary locations from the primary location,
    3470              :     /// indicating which layers are currently on-disk on the primary.
    3471              :     ///
    3472              :     /// None is returned if the Timeline is in a state where uploading a heatmap
    3473              :     /// doesn't make sense, such as shutting down or initializing.  The caller
    3474              :     /// should treat this as a cue to simply skip doing any heatmap uploading
    3475              :     /// for this timeline.
    3476           35 :     pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
    3477           35 :         if !self.is_active() {
    3478            0 :             return None;
    3479           35 :         }
    3480              : 
    3481           35 :         let guard = self.layers.read().await;
    3482              : 
    3483              :         // Firstly, if there's any heatmap left over from when this location
    3484              :         // was a secondary, take that into account. Keep layers that are:
    3485              :         // * present in the layer map
    3486              :         // * visible
    3487              :         // * non-resident
    3488              :         // * not evicted since we read the heatmap
    3489              :         //
    3490              :         // Without this, a new cold, attached location would clobber the previous
    3491              :         // heatamp.
    3492           35 :         let previous_heatmap = self.previous_heatmap.load();
    3493           35 :         let visible_non_resident = match previous_heatmap.as_deref() {
    3494           26 :             Some(PreviousHeatmap::Active { heatmap, read_at }) => {
    3495          100 :                 Some(heatmap.layers.iter().filter_map(|hl| {
    3496          100 :                     let desc: PersistentLayerDesc = hl.name.clone().into();
    3497          100 :                     let layer = guard.try_get_from_key(&desc.key())?;
    3498              : 
    3499          100 :                     if layer.visibility() == LayerVisibilityHint::Covered {
    3500            0 :                         return None;
    3501          100 :                     }
    3502          100 : 
    3503          100 :                     if layer.is_likely_resident() {
    3504           41 :                         return None;
    3505           59 :                     }
    3506           59 : 
    3507           59 :                     if layer.last_evicted_at().happened_after(*read_at) {
    3508           13 :                         return None;
    3509           46 :                     }
    3510           46 : 
    3511           46 :                     Some((desc, hl.metadata.clone(), hl.access_time))
    3512          100 :                 }))
    3513              :             }
    3514            0 :             Some(PreviousHeatmap::Obsolete) => None,
    3515            9 :             None => None,
    3516              :         };
    3517              : 
    3518              :         // Secondly, all currently visible, resident layers are included.
    3519           78 :         let resident = guard.likely_resident_layers().filter_map(|layer| {
    3520           78 :             match layer.visibility() {
    3521              :                 LayerVisibilityHint::Visible => {
    3522              :                     // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
    3523           73 :                     let last_activity_ts = layer.latest_activity();
    3524           73 :                     Some((
    3525           73 :                         layer.layer_desc().clone(),
    3526           73 :                         layer.metadata(),
    3527           73 :                         last_activity_ts,
    3528           73 :                     ))
    3529              :                 }
    3530              :                 LayerVisibilityHint::Covered => {
    3531              :                     // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
    3532            5 :                     None
    3533              :                 }
    3534              :             }
    3535           78 :         });
    3536              : 
    3537           35 :         let mut layers = match visible_non_resident {
    3538           26 :             Some(non_resident) => {
    3539           26 :                 let mut non_resident = non_resident.peekable();
    3540           26 :                 if non_resident.peek().is_none() {
    3541            8 :                     self.previous_heatmap
    3542            8 :                         .store(Some(PreviousHeatmap::Obsolete.into()));
    3543           18 :                 }
    3544              : 
    3545           26 :                 non_resident.chain(resident).collect::<Vec<_>>()
    3546              :             }
    3547            9 :             None => resident.collect::<Vec<_>>(),
    3548              :         };
    3549              : 
    3550              :         // Sort layers in order of which to download first.  For a large set of layers to download, we
    3551              :         // want to prioritize those layers which are most likely to still be in the resident many minutes
    3552              :         // or hours later:
    3553              :         // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
    3554              :         //   only exist for a few minutes before being compacted into L1s.
    3555              :         // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
    3556              :         //   the layer is likely to be covered by an image layer during compaction.
    3557          262 :         layers.sort_by_key(|(desc, _meta, _atime)| {
    3558          262 :             std::cmp::Reverse((
    3559          262 :                 !LayerMap::is_l0(&desc.key_range, desc.is_delta),
    3560          262 :                 desc.lsn_range.end,
    3561          262 :             ))
    3562          262 :         });
    3563           35 : 
    3564           35 :         let layers = layers
    3565           35 :             .into_iter()
    3566          119 :             .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
    3567           35 :             .collect();
    3568           35 : 
    3569           35 :         Some(HeatMapTimeline::new(self.timeline_id, layers))
    3570           35 :     }
    3571              : 
    3572              :     /// Returns true if the given lsn is or was an ancestor branchpoint.
    3573            0 :     pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
    3574            0 :         // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
    3575            0 :         // branchpoint in the value in IndexPart::lineage
    3576            0 :         self.ancestor_lsn == lsn
    3577            0 :             || (self.ancestor_lsn == Lsn::INVALID
    3578            0 :                 && self.remote_client.is_previous_ancestor_lsn(lsn))
    3579            0 :     }
    3580              : }
    3581              : 
    3582              : impl Timeline {
    3583              :     #[allow(clippy::doc_lazy_continuation)]
    3584              :     /// Get the data needed to reconstruct all keys in the provided keyspace
    3585              :     ///
    3586              :     /// The algorithm is as follows:
    3587              :     /// 1.   While some keys are still not done and there's a timeline to visit:
    3588              :     /// 2.   Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
    3589              :     /// 2.1: Build the fringe for the current keyspace
    3590              :     /// 2.2  Visit the newest layer from the fringe to collect all values for the range it
    3591              :     ///      intersects
    3592              :     /// 2.3. Pop the timeline from the fringe
    3593              :     /// 2.4. If the fringe is empty, go back to 1
    3594      1255313 :     async fn get_vectored_reconstruct_data(
    3595      1255313 :         &self,
    3596      1255313 :         mut keyspace: KeySpace,
    3597      1255313 :         request_lsn: Lsn,
    3598      1255313 :         reconstruct_state: &mut ValuesReconstructState,
    3599      1255313 :         ctx: &RequestContext,
    3600      1255313 :     ) -> Result<(), GetVectoredError> {
    3601      1255313 :         let mut timeline_owned: Arc<Timeline>;
    3602      1255313 :         let mut timeline = self;
    3603      1255313 : 
    3604      1255313 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3605              : 
    3606      1255309 :         let missing_keyspace = loop {
    3607      1706985 :             if self.cancel.is_cancelled() {
    3608            0 :                 return Err(GetVectoredError::Cancelled);
    3609      1706985 :             }
    3610              : 
    3611              :             let TimelineVisitOutcome {
    3612      1706985 :                 completed_keyspace: completed,
    3613      1706985 :                 image_covered_keyspace,
    3614      1706985 :             } = Self::get_vectored_reconstruct_data_timeline(
    3615      1706985 :                 timeline,
    3616      1706985 :                 keyspace.clone(),
    3617      1706985 :                 cont_lsn,
    3618      1706985 :                 reconstruct_state,
    3619      1706985 :                 &self.cancel,
    3620      1706985 :                 ctx,
    3621      1706985 :             )
    3622      1706985 :             .await?;
    3623              : 
    3624      1706985 :             keyspace.remove_overlapping_with(&completed);
    3625      1706985 : 
    3626      1706985 :             // Do not descend into the ancestor timeline for aux files.
    3627      1706985 :             // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
    3628      1706985 :             // stalling compaction.
    3629      1706985 :             keyspace.remove_overlapping_with(&KeySpace {
    3630      1706985 :                 ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
    3631      1706985 :             });
    3632      1706985 : 
    3633      1706985 :             // Keyspace is fully retrieved
    3634      1706985 :             if keyspace.is_empty() {
    3635      1255253 :                 break None;
    3636       451732 :             }
    3637              : 
    3638       451732 :             let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
    3639              :                 // Not fully retrieved but no ancestor timeline.
    3640           56 :                 break Some(keyspace);
    3641              :             };
    3642              : 
    3643              :             // Now we see if there are keys covered by the image layer but does not exist in the
    3644              :             // image layer, which means that the key does not exist.
    3645              : 
    3646              :             // The block below will stop the vectored search if any of the keys encountered an image layer
    3647              :             // which did not contain a snapshot for said key. Since we have already removed all completed
    3648              :             // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
    3649              :             // space. If that's not the case, we had at least one key encounter a gap in the image layer
    3650              :             // and stop the search as a result of that.
    3651       451676 :             let mut removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
    3652       451676 :             // Do not fire missing key error for sparse keys.
    3653       451676 :             removed.remove_overlapping_with(&KeySpace {
    3654       451676 :                 ranges: vec![SPARSE_RANGE],
    3655       451676 :             });
    3656       451676 :             if !removed.is_empty() {
    3657            0 :                 break Some(removed);
    3658       451676 :             }
    3659       451676 :             // If we reached this point, `remove_overlapping_with` should not have made any change to the
    3660       451676 :             // keyspace.
    3661       451676 : 
    3662       451676 :             // Take the min to avoid reconstructing a page with data newer than request Lsn.
    3663       451676 :             cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
    3664       451676 :             timeline_owned = timeline
    3665       451676 :                 .get_ready_ancestor_timeline(ancestor_timeline, ctx)
    3666       451676 :                 .await?;
    3667       451672 :             timeline = &*timeline_owned;
    3668              :         };
    3669              : 
    3670              :         // Remove sparse keys from the keyspace so that it doesn't fire errors.
    3671      1255309 :         let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
    3672           56 :             let mut missing_keyspace = missing_keyspace;
    3673           56 :             missing_keyspace.remove_overlapping_with(&KeySpace {
    3674           56 :                 ranges: vec![SPARSE_RANGE],
    3675           56 :             });
    3676           56 :             if missing_keyspace.is_empty() {
    3677           28 :                 None
    3678              :             } else {
    3679           28 :                 Some(missing_keyspace)
    3680              :             }
    3681              :         } else {
    3682      1255253 :             None
    3683              :         };
    3684              : 
    3685      1255309 :         if let Some(missing_keyspace) = missing_keyspace {
    3686           28 :             return Err(GetVectoredError::MissingKey(MissingKeyError {
    3687           28 :                 key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
    3688           28 :                 shard: self
    3689           28 :                     .shard_identity
    3690           28 :                     .get_shard_number(&missing_keyspace.start().unwrap()),
    3691           28 :                 cont_lsn,
    3692           28 :                 request_lsn,
    3693           28 :                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3694           28 :                 backtrace: None,
    3695           28 :                 read_path: std::mem::take(&mut reconstruct_state.read_path),
    3696           28 :             }));
    3697      1255281 :         }
    3698      1255281 : 
    3699      1255281 :         Ok(())
    3700      1255313 :     }
    3701              : 
    3702              :     /// Collect the reconstruct data for a keyspace from the specified timeline.
    3703              :     ///
    3704              :     /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
    3705              :     /// the current keyspace. The current keyspace of the search at any given timeline
    3706              :     /// is the original keyspace minus all the keys that have been completed minus
    3707              :     /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
    3708              :     /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
    3709              :     ///
    3710              :     /// This is basically a depth-first search visitor implementation where a vertex
    3711              :     /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
    3712              :     ///
    3713              :     /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
    3714              :     /// and get all the required reconstruct data from the layer in one go.
    3715              :     ///
    3716              :     /// Returns the completed keyspace and the keyspaces with image coverage. The caller
    3717              :     /// decides how to deal with these two keyspaces.
    3718      1706985 :     async fn get_vectored_reconstruct_data_timeline(
    3719      1706985 :         timeline: &Timeline,
    3720      1706985 :         keyspace: KeySpace,
    3721      1706985 :         mut cont_lsn: Lsn,
    3722      1706985 :         reconstruct_state: &mut ValuesReconstructState,
    3723      1706985 :         cancel: &CancellationToken,
    3724      1706985 :         ctx: &RequestContext,
    3725      1706985 :     ) -> Result<TimelineVisitOutcome, GetVectoredError> {
    3726      1706985 :         let mut unmapped_keyspace = keyspace.clone();
    3727      1706985 :         let mut fringe = LayerFringe::new();
    3728      1706985 : 
    3729      1706985 :         let mut completed_keyspace = KeySpace::default();
    3730      1706985 :         let mut image_covered_keyspace = KeySpaceRandomAccum::new();
    3731      1706985 : 
    3732      1706985 :         // Prevent GC from progressing while visiting the current timeline.
    3733      1706985 :         // If we are GC-ing because a new image layer was added while traversing
    3734      1706985 :         // the timeline, then it will remove layers that are required for fulfilling
    3735      1706985 :         // the current get request (read-path cannot "look back" and notice the new
    3736      1706985 :         // image layer).
    3737      1706985 :         let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
    3738              : 
    3739              :         // See `compaction::compact_with_gc` for why we need this.
    3740      1706985 :         let _guard = timeline.gc_compaction_layer_update_lock.read().await;
    3741              : 
    3742              :         loop {
    3743      3474918 :             if cancel.is_cancelled() {
    3744            0 :                 return Err(GetVectoredError::Cancelled);
    3745      3474918 :             }
    3746      3474918 : 
    3747      3474918 :             let (keys_done_last_step, keys_with_image_coverage) =
    3748      3474918 :                 reconstruct_state.consume_done_keys();
    3749      3474918 :             unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
    3750      3474918 :             completed_keyspace.merge(&keys_done_last_step);
    3751      3474918 :             if let Some(keys_with_image_coverage) = keys_with_image_coverage {
    3752         4996 :                 unmapped_keyspace
    3753         4996 :                     .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
    3754         4996 :                 image_covered_keyspace.add_range(keys_with_image_coverage);
    3755      3469922 :             }
    3756              : 
    3757              :             // Do not descent any further if the last layer we visited
    3758              :             // completed all keys in the keyspace it inspected. This is not
    3759              :             // required for correctness, but avoids visiting extra layers
    3760              :             // which turns out to be a perf bottleneck in some cases.
    3761      3474918 :             if !unmapped_keyspace.is_empty() {
    3762      2264198 :                 let guard = timeline.layers.read().await;
    3763      2264198 :                 let layers = guard.layer_map()?;
    3764              : 
    3765      2264198 :                 let in_memory_layer = layers.find_in_memory_layer(|l| {
    3766      1829134 :                     let start_lsn = l.get_lsn_range().start;
    3767      1829134 :                     cont_lsn > start_lsn
    3768      2264198 :                 });
    3769      2264198 : 
    3770      2264198 :                 match in_memory_layer {
    3771      1213320 :                     Some(l) => {
    3772      1213320 :                         let lsn_range = l.get_lsn_range().start..cont_lsn;
    3773      1213320 :                         fringe.update(
    3774      1213320 :                             ReadableLayer::InMemoryLayer(l),
    3775      1213320 :                             unmapped_keyspace.clone(),
    3776      1213320 :                             lsn_range,
    3777      1213320 :                         );
    3778      1213320 :                     }
    3779              :                     None => {
    3780      1050928 :                         for range in unmapped_keyspace.ranges.iter() {
    3781      1050928 :                             let results = layers.range_search(range.clone(), cont_lsn);
    3782      1050928 : 
    3783      1050928 :                             results
    3784      1050928 :                                 .found
    3785      1050928 :                                 .into_iter()
    3786      1050928 :                                 .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
    3787       554665 :                                     (
    3788       554665 :                                         ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
    3789       554665 :                                         keyspace_accum.to_keyspace(),
    3790       554665 :                                         lsn_floor..cont_lsn,
    3791       554665 :                                     )
    3792      1050928 :                                 })
    3793      1050928 :                                 .for_each(|(layer, keyspace, lsn_range)| {
    3794       554665 :                                     fringe.update(layer, keyspace, lsn_range)
    3795      1050928 :                                 });
    3796      1050928 :                         }
    3797              :                     }
    3798              :                 }
    3799              : 
    3800              :                 // It's safe to drop the layer map lock after planning the next round of reads.
    3801              :                 // The fringe keeps readable handles for the layers which are safe to read even
    3802              :                 // if layers were compacted or flushed.
    3803              :                 //
    3804              :                 // The more interesting consideration is: "Why is the read algorithm still correct
    3805              :                 // if the layer map changes while it is operating?". Doing a vectored read on a
    3806              :                 // timeline boils down to pushing an imaginary lsn boundary downwards for each range
    3807              :                 // covered by the read. The layer map tells us how to move the lsn downwards for a
    3808              :                 // range at *a particular point in time*. It is fine for the answer to be different
    3809              :                 // at two different time points.
    3810      2264198 :                 drop(guard);
    3811      1210720 :             }
    3812              : 
    3813      3474918 :             if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
    3814      1767933 :                 if let Some(ref mut read_path) = reconstruct_state.read_path {
    3815      1767933 :                     read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
    3816      1767933 :                 }
    3817      1767933 :                 let next_cont_lsn = lsn_range.start;
    3818      1767933 :                 layer_to_read
    3819      1767933 :                     .get_values_reconstruct_data(
    3820      1767933 :                         keyspace_to_read.clone(),
    3821      1767933 :                         lsn_range,
    3822      1767933 :                         reconstruct_state,
    3823      1767933 :                         ctx,
    3824      1767933 :                     )
    3825      1767933 :                     .await?;
    3826              : 
    3827      1767933 :                 unmapped_keyspace = keyspace_to_read;
    3828      1767933 :                 cont_lsn = next_cont_lsn;
    3829      1767933 : 
    3830      1767933 :                 reconstruct_state.on_layer_visited(&layer_to_read);
    3831              :             } else {
    3832      1706985 :                 break;
    3833      1706985 :             }
    3834      1706985 :         }
    3835      1706985 : 
    3836      1706985 :         Ok(TimelineVisitOutcome {
    3837      1706985 :             completed_keyspace,
    3838      1706985 :             image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
    3839      1706985 :         })
    3840      1706985 :     }
    3841              : 
    3842       451676 :     async fn get_ready_ancestor_timeline(
    3843       451676 :         &self,
    3844       451676 :         ancestor: &Arc<Timeline>,
    3845       451676 :         ctx: &RequestContext,
    3846       451676 :     ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
    3847       451676 :         // It's possible that the ancestor timeline isn't active yet, or
    3848       451676 :         // is active but hasn't yet caught up to the branch point. Wait
    3849       451676 :         // for it.
    3850       451676 :         //
    3851       451676 :         // This cannot happen while the pageserver is running normally,
    3852       451676 :         // because you cannot create a branch from a point that isn't
    3853       451676 :         // present in the pageserver yet. However, we don't wait for the
    3854       451676 :         // branch point to be uploaded to cloud storage before creating
    3855       451676 :         // a branch. I.e., the branch LSN need not be remote consistent
    3856       451676 :         // for the branching operation to succeed.
    3857       451676 :         //
    3858       451676 :         // Hence, if we try to load a tenant in such a state where
    3859       451676 :         // 1. the existence of the branch was persisted (in IndexPart and/or locally)
    3860       451676 :         // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
    3861       451676 :         // then we will need to wait for the ancestor timeline to
    3862       451676 :         // re-stream WAL up to branch_lsn before we access it.
    3863       451676 :         //
    3864       451676 :         // How can a tenant get in such a state?
    3865       451676 :         // - ungraceful pageserver process exit
    3866       451676 :         // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
    3867       451676 :         //
    3868       451676 :         // NB: this could be avoided by requiring
    3869       451676 :         //   branch_lsn >= remote_consistent_lsn
    3870       451676 :         // during branch creation.
    3871       451676 :         match ancestor.wait_to_become_active(ctx).await {
    3872       451672 :             Ok(()) => {}
    3873              :             Err(TimelineState::Stopping) => {
    3874              :                 // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
    3875            0 :                 return Err(GetReadyAncestorError::Cancelled);
    3876              :             }
    3877            4 :             Err(state) => {
    3878            4 :                 return Err(GetReadyAncestorError::BadState {
    3879            4 :                     timeline_id: ancestor.timeline_id,
    3880            4 :                     state,
    3881            4 :                 });
    3882              :             }
    3883              :         }
    3884       451672 :         ancestor
    3885       451672 :             .wait_lsn(
    3886       451672 :                 self.ancestor_lsn,
    3887       451672 :                 WaitLsnWaiter::Timeline(self),
    3888       451672 :                 WaitLsnTimeout::Default,
    3889       451672 :                 ctx,
    3890       451672 :             )
    3891       451672 :             .await
    3892       451672 :             .map_err(|e| match e {
    3893            0 :                 e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
    3894            0 :                 WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
    3895            0 :                 WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
    3896            0 :                     timeline_id: ancestor.timeline_id,
    3897            0 :                     state,
    3898            0 :                 },
    3899       451672 :             })?;
    3900              : 
    3901       451672 :         Ok(ancestor.clone())
    3902       451676 :     }
    3903              : 
    3904       594312 :     pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
    3905       594312 :         &self.shard_identity
    3906       594312 :     }
    3907              : 
    3908              :     #[inline(always)]
    3909            0 :     pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
    3910            0 :         ShardTimelineId {
    3911            0 :             shard_index: ShardIndex {
    3912            0 :                 shard_number: self.shard_identity.number,
    3913            0 :                 shard_count: self.shard_identity.count,
    3914            0 :             },
    3915            0 :             timeline_id: self.timeline_id,
    3916            0 :         }
    3917            0 :     }
    3918              : 
    3919              :     /// Returns a non-frozen open in-memory layer for ingestion.
    3920              :     ///
    3921              :     /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
    3922              :     /// this function without holding the mutex.
    3923         2597 :     async fn get_layer_for_write(
    3924         2597 :         &self,
    3925         2597 :         lsn: Lsn,
    3926         2597 :         _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    3927         2597 :         ctx: &RequestContext,
    3928         2597 :     ) -> anyhow::Result<Arc<InMemoryLayer>> {
    3929         2597 :         let mut guard = self.layers.write().await;
    3930              : 
    3931         2597 :         let last_record_lsn = self.get_last_record_lsn();
    3932         2597 :         ensure!(
    3933         2597 :             lsn > last_record_lsn,
    3934            0 :             "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
    3935              :             lsn,
    3936              :             last_record_lsn,
    3937              :         );
    3938              : 
    3939         2597 :         let layer = guard
    3940         2597 :             .open_mut()?
    3941         2597 :             .get_layer_for_write(
    3942         2597 :                 lsn,
    3943         2597 :                 self.conf,
    3944         2597 :                 self.timeline_id,
    3945         2597 :                 self.tenant_shard_id,
    3946         2597 :                 &self.gate,
    3947         2597 :                 ctx,
    3948         2597 :             )
    3949         2597 :             .await?;
    3950         2597 :         Ok(layer)
    3951         2597 :     }
    3952              : 
    3953     10558189 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    3954     10558189 :         assert!(new_lsn.is_aligned());
    3955              : 
    3956     10558189 :         self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
    3957     10558189 :         self.last_record_lsn.advance(new_lsn);
    3958     10558189 :     }
    3959              : 
    3960              :     /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
    3961              :     ///
    3962              :     /// Unconditional flush loop notification is given because in sharded cases we will want to
    3963              :     /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
    3964         2405 :     async fn freeze_inmem_layer_at(
    3965         2405 :         &self,
    3966         2405 :         at: Lsn,
    3967         2405 :         write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    3968         2405 :     ) -> Result<u64, FlushLayerError> {
    3969         2405 :         let frozen = {
    3970         2405 :             let mut guard = self.layers.write().await;
    3971         2405 :             guard
    3972         2405 :                 .open_mut()?
    3973         2405 :                 .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
    3974         2405 :                 .await
    3975              :         };
    3976              : 
    3977         2405 :         if frozen {
    3978         2349 :             let now = Instant::now();
    3979         2349 :             *(self.last_freeze_ts.write().unwrap()) = now;
    3980         2349 :         }
    3981              : 
    3982              :         // Increment the flush cycle counter and wake up the flush task.
    3983              :         // Remember the new value, so that when we listen for the flush
    3984              :         // to finish, we know when the flush that we initiated has
    3985              :         // finished, instead of some other flush that was started earlier.
    3986         2405 :         let mut my_flush_request = 0;
    3987         2405 : 
    3988         2405 :         let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
    3989         2405 :         if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
    3990            0 :             return Err(FlushLayerError::NotRunning(flush_loop_state));
    3991         2405 :         }
    3992         2405 : 
    3993         2405 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    3994         2405 :             my_flush_request = *counter + 1;
    3995         2405 :             *counter = my_flush_request;
    3996         2405 :             *lsn = std::cmp::max(at, *lsn);
    3997         2405 :         });
    3998         2405 : 
    3999         2405 :         assert_ne!(my_flush_request, 0);
    4000              : 
    4001         2405 :         Ok(my_flush_request)
    4002         2405 :     }
    4003              : 
    4004              :     /// Layer flusher task's main loop.
    4005          885 :     async fn flush_loop(
    4006          885 :         self: &Arc<Self>,
    4007          885 :         mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
    4008          885 :         ctx: &RequestContext,
    4009          885 :     ) {
    4010              :         // Subscribe to L0 delta layer updates, for compaction backpressure.
    4011          885 :         let mut watch_l0 = match self.layers.read().await.layer_map() {
    4012          885 :             Ok(lm) => lm.watch_level0_deltas(),
    4013            0 :             Err(Shutdown) => return,
    4014              :         };
    4015              : 
    4016          885 :         info!("started flush loop");
    4017              :         loop {
    4018         3215 :             tokio::select! {
    4019         3215 :                 _ = self.cancel.cancelled() => {
    4020           20 :                     info!("shutting down layer flush task due to Timeline::cancel");
    4021           20 :                     break;
    4022              :                 },
    4023         3215 :                 _ = layer_flush_start_rx.changed() => {}
    4024         2330 :             }
    4025         2330 :             trace!("waking up");
    4026         2330 :             let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
    4027         2330 : 
    4028         2330 :             // The highest LSN to which we flushed in the loop over frozen layers
    4029         2330 :             let mut flushed_to_lsn = Lsn(0);
    4030              : 
    4031         2330 :             let result = loop {
    4032         4679 :                 if self.cancel.is_cancelled() {
    4033            0 :                     info!("dropping out of flush loop for timeline shutdown");
    4034              :                     // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
    4035              :                     // anyone waiting on that will respect self.cancel as well: they will stop
    4036              :                     // waiting at the same time we as drop out of this loop.
    4037            0 :                     return;
    4038         4679 :                 }
    4039         4679 : 
    4040         4679 :                 // Break to notify potential waiters as soon as we've flushed the requested LSN. If
    4041         4679 :                 // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
    4042         4679 :                 if flushed_to_lsn >= frozen_to_lsn {
    4043         2274 :                     break Ok(());
    4044         2405 :                 }
    4045              : 
    4046              :                 // Fetch the next layer to flush, if any.
    4047         2405 :                 let (layer, l0_count, frozen_count, frozen_size) = {
    4048         2405 :                     let layers = self.layers.read().await;
    4049         2405 :                     let Ok(lm) = layers.layer_map() else {
    4050            0 :                         info!("dropping out of flush loop for timeline shutdown");
    4051            0 :                         return;
    4052              :                     };
    4053         2405 :                     let l0_count = lm.level0_deltas().len();
    4054         2405 :                     let frozen_count = lm.frozen_layers.len();
    4055         2405 :                     let frozen_size: u64 = lm
    4056         2405 :                         .frozen_layers
    4057         2405 :                         .iter()
    4058         2425 :                         .map(|l| l.estimated_in_mem_size())
    4059         2405 :                         .sum();
    4060         2405 :                     let layer = lm.frozen_layers.front().cloned();
    4061         2405 :                     (layer, l0_count, frozen_count, frozen_size)
    4062         2405 :                     // drop 'layers' lock
    4063         2405 :                 };
    4064         2405 :                 let Some(layer) = layer else {
    4065           56 :                     break Ok(());
    4066              :                 };
    4067              : 
    4068              :                 // Stall flushes to backpressure if compaction can't keep up. This is propagated up
    4069              :                 // to WAL ingestion by having ephemeral layer rolls wait for flushes.
    4070              :                 //
    4071              :                 // NB: the compaction loop only checks `compaction_threshold` every 20 seconds, so
    4072              :                 // we can end up stalling before compaction even starts. Consider making it more
    4073              :                 // responsive (e.g. via `watch_level0_deltas`).
    4074         2349 :                 if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
    4075            0 :                     if l0_count >= stall_threshold {
    4076            0 :                         warn!(
    4077            0 :                             "stalling layer flushes for compaction backpressure at {l0_count} \
    4078            0 :                             L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
    4079              :                         );
    4080            0 :                         let stall_timer = self
    4081            0 :                             .metrics
    4082            0 :                             .flush_delay_histo
    4083            0 :                             .start_timer()
    4084            0 :                             .record_on_drop();
    4085            0 :                         tokio::select! {
    4086            0 :                             result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
    4087            0 :                                 if let Ok(l0) = result.as_deref() {
    4088            0 :                                     let delay = stall_timer.elapsed().as_secs_f64();
    4089            0 :                                     info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
    4090            0 :                                 }
    4091              :                             },
    4092            0 :                             _ = self.cancel.cancelled() => {},
    4093              :                         }
    4094            0 :                         continue; // check again
    4095            0 :                     }
    4096         2349 :                 }
    4097              : 
    4098              :                 // Flush the layer.
    4099         2349 :                 let flush_timer = self.metrics.flush_time_histo.start_timer();
    4100         2349 :                 match self.flush_frozen_layer(layer, ctx).await {
    4101         2349 :                     Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
    4102              :                     Err(FlushLayerError::Cancelled) => {
    4103            0 :                         info!("dropping out of flush loop for timeline shutdown");
    4104            0 :                         return;
    4105              :                     }
    4106            0 :                     err @ Err(
    4107            0 :                         FlushLayerError::NotRunning(_)
    4108            0 :                         | FlushLayerError::Other(_)
    4109            0 :                         | FlushLayerError::CreateImageLayersError(_),
    4110            0 :                     ) => {
    4111            0 :                         error!("could not flush frozen layer: {err:?}");
    4112            0 :                         break err.map(|_| ());
    4113              :                     }
    4114              :                 }
    4115         2349 :                 let flush_duration = flush_timer.stop_and_record();
    4116         2349 : 
    4117         2349 :                 // Notify the tenant compaction loop if L0 compaction is needed.
    4118         2349 :                 let l0_count = *watch_l0.borrow();
    4119         2349 :                 if l0_count >= self.get_compaction_threshold() {
    4120          952 :                     self.l0_compaction_trigger.notify_one();
    4121         1397 :                 }
    4122              : 
    4123              :                 // Delay the next flush to backpressure if compaction can't keep up. We delay by the
    4124              :                 // flush duration such that the flush takes 2x as long. This is propagated up to WAL
    4125              :                 // ingestion by having ephemeral layer rolls wait for flushes.
    4126         2349 :                 if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
    4127            0 :                     if l0_count >= delay_threshold {
    4128            0 :                         let delay = flush_duration.as_secs_f64();
    4129            0 :                         info!(
    4130            0 :                             "delaying layer flush by {delay:.3}s for compaction backpressure at \
    4131            0 :                             {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
    4132              :                         );
    4133            0 :                         let _delay_timer = self
    4134            0 :                             .metrics
    4135            0 :                             .flush_delay_histo
    4136            0 :                             .start_timer()
    4137            0 :                             .record_on_drop();
    4138            0 :                         tokio::select! {
    4139            0 :                             _ = tokio::time::sleep(flush_duration) => {},
    4140            0 :                             _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
    4141            0 :                             _ = self.cancel.cancelled() => {},
    4142              :                         }
    4143            0 :                     }
    4144         2349 :                 }
    4145              :             };
    4146              : 
    4147              :             // Unsharded tenants should never advance their LSN beyond the end of the
    4148              :             // highest layer they write: such gaps between layer data and the frozen LSN
    4149              :             // are only legal on sharded tenants.
    4150         2330 :             debug_assert!(
    4151         2330 :                 self.shard_identity.count.count() > 1
    4152         2330 :                     || flushed_to_lsn >= frozen_to_lsn
    4153           56 :                     || !flushed_to_lsn.is_valid()
    4154              :             );
    4155              : 
    4156         2330 :             if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
    4157              :                 // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
    4158              :                 // to us via layer_flush_start_rx, then advance it here.
    4159              :                 //
    4160              :                 // This path is only taken for tenants with multiple shards: single sharded tenants should
    4161              :                 // never encounter a gap in the wal.
    4162            0 :                 let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
    4163            0 :                 tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
    4164            0 :                 if self.set_disk_consistent_lsn(frozen_to_lsn) {
    4165            0 :                     if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
    4166            0 :                         tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
    4167            0 :                     }
    4168            0 :                 }
    4169         2330 :             }
    4170              : 
    4171              :             // Notify any listeners that we're done
    4172         2330 :             let _ = self
    4173         2330 :                 .layer_flush_done_tx
    4174         2330 :                 .send_replace((flush_counter, result));
    4175              :         }
    4176           20 :     }
    4177              : 
    4178              :     /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
    4179         2245 :     async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
    4180         2245 :         let mut rx = self.layer_flush_done_tx.subscribe();
    4181              :         loop {
    4182              :             {
    4183         4567 :                 let (last_result_counter, last_result) = &*rx.borrow();
    4184         4567 :                 if *last_result_counter >= request {
    4185         2245 :                     if let Err(err) = last_result {
    4186              :                         // We already logged the original error in
    4187              :                         // flush_loop. We cannot propagate it to the caller
    4188              :                         // here, because it might not be Cloneable
    4189            0 :                         return Err(err.clone());
    4190              :                     } else {
    4191         2245 :                         return Ok(());
    4192              :                     }
    4193         2322 :                 }
    4194         2322 :             }
    4195         2322 :             trace!("waiting for flush to complete");
    4196         2322 :             tokio::select! {
    4197         2322 :                 rx_e = rx.changed() => {
    4198         2322 :                     rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
    4199              :                 },
    4200              :                 // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
    4201              :                 // the notification from [`flush_loop`] that it completed.
    4202         2322 :                 _ = self.cancel.cancelled() => {
    4203            0 :                     tracing::info!("Cancelled layer flush due on timeline shutdown");
    4204            0 :                     return Ok(())
    4205              :                 }
    4206              :             };
    4207         2322 :             trace!("done")
    4208              :         }
    4209         2245 :     }
    4210              : 
    4211              :     /// Flush one frozen in-memory layer to disk, as a new delta layer.
    4212              :     ///
    4213              :     /// Return value is the last lsn (inclusive) of the layer that was frozen.
    4214              :     #[instrument(skip_all, fields(layer=%frozen_layer))]
    4215              :     async fn flush_frozen_layer(
    4216              :         self: &Arc<Self>,
    4217              :         frozen_layer: Arc<InMemoryLayer>,
    4218              :         ctx: &RequestContext,
    4219              :     ) -> Result<Lsn, FlushLayerError> {
    4220              :         debug_assert_current_span_has_tenant_and_timeline_id();
    4221              : 
    4222              :         // As a special case, when we have just imported an image into the repository,
    4223              :         // instead of writing out a L0 delta layer, we directly write out image layer
    4224              :         // files instead. This is possible as long as *all* the data imported into the
    4225              :         // repository have the same LSN.
    4226              :         let lsn_range = frozen_layer.get_lsn_range();
    4227              : 
    4228              :         // Whether to directly create image layers for this flush, or flush them as delta layers
    4229              :         let create_image_layer =
    4230              :             lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
    4231              : 
    4232              :         #[cfg(test)]
    4233              :         {
    4234              :             match &mut *self.flush_loop_state.lock().unwrap() {
    4235              :                 FlushLoopState::NotStarted | FlushLoopState::Exited => {
    4236              :                     panic!("flush loop not running")
    4237              :                 }
    4238              :                 FlushLoopState::Running {
    4239              :                     expect_initdb_optimization,
    4240              :                     initdb_optimization_count,
    4241              :                     ..
    4242              :                 } => {
    4243              :                     if create_image_layer {
    4244              :                         *initdb_optimization_count += 1;
    4245              :                     } else {
    4246              :                         assert!(!*expect_initdb_optimization, "expected initdb optimization");
    4247              :                     }
    4248              :                 }
    4249              :             }
    4250              :         }
    4251              : 
    4252              :         let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
    4253              :             // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
    4254              :             // require downloading anything during initial import.
    4255              :             let ((rel_partition, metadata_partition), _lsn) = self
    4256              :                 .repartition(
    4257              :                     self.initdb_lsn,
    4258              :                     self.get_compaction_target_size(),
    4259              :                     EnumSet::empty(),
    4260              :                     ctx,
    4261              :                 )
    4262              :                 .await
    4263            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
    4264              : 
    4265              :             if self.cancel.is_cancelled() {
    4266              :                 return Err(FlushLayerError::Cancelled);
    4267              :             }
    4268              : 
    4269              :             // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
    4270              :             // So that the key ranges don't overlap.
    4271              :             let mut partitions = KeyPartitioning::default();
    4272              :             partitions.parts.extend(rel_partition.parts);
    4273              :             if !metadata_partition.parts.is_empty() {
    4274              :                 assert_eq!(
    4275              :                     metadata_partition.parts.len(),
    4276              :                     1,
    4277              :                     "currently sparse keyspace should only contain a single metadata keyspace"
    4278              :                 );
    4279              :                 // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
    4280              :                 // every single key within the keyspace, and therefore, it's safe to force converting it
    4281              :                 // into a dense keyspace before calling this function.
    4282              :                 partitions
    4283              :                     .parts
    4284              :                     .extend(metadata_partition.into_dense().parts);
    4285              :             }
    4286              : 
    4287              :             let mut layers_to_upload = Vec::new();
    4288              :             let (generated_image_layers, is_complete) = self
    4289              :                 .create_image_layers(
    4290              :                     &partitions,
    4291              :                     self.initdb_lsn,
    4292              :                     ImageLayerCreationMode::Initial,
    4293              :                     ctx,
    4294              :                     LastImageLayerCreationStatus::Initial,
    4295              :                     false, // don't yield for L0, we're flushing L0
    4296              :                 )
    4297              :                 .await?;
    4298              :             debug_assert!(
    4299              :                 matches!(is_complete, LastImageLayerCreationStatus::Complete),
    4300              :                 "init image generation mode must fully cover the keyspace"
    4301              :             );
    4302              :             layers_to_upload.extend(generated_image_layers);
    4303              : 
    4304              :             (layers_to_upload, None)
    4305              :         } else {
    4306              :             // Normal case, write out a L0 delta layer file.
    4307              :             // `create_delta_layer` will not modify the layer map.
    4308              :             // We will remove frozen layer and add delta layer in one atomic operation later.
    4309              :             let Some(layer) = self
    4310              :                 .create_delta_layer(&frozen_layer, None, ctx)
    4311              :                 .await
    4312            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?
    4313              :             else {
    4314              :                 panic!("delta layer cannot be empty if no filter is applied");
    4315              :             };
    4316              :             (
    4317              :                 // FIXME: even though we have a single image and single delta layer assumption
    4318              :                 // we push them to vec
    4319              :                 vec![layer.clone()],
    4320              :                 Some(layer),
    4321              :             )
    4322              :         };
    4323              : 
    4324              :         pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
    4325              : 
    4326              :         if self.cancel.is_cancelled() {
    4327              :             return Err(FlushLayerError::Cancelled);
    4328              :         }
    4329              : 
    4330              :         let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
    4331              : 
    4332              :         // The new on-disk layers are now in the layer map. We can remove the
    4333              :         // in-memory layer from the map now. The flushed layer is stored in
    4334              :         // the mapping in `create_delta_layer`.
    4335              :         {
    4336              :             let mut guard = self.layers.write().await;
    4337              : 
    4338              :             guard.open_mut()?.finish_flush_l0_layer(
    4339              :                 delta_layer_to_add.as_ref(),
    4340              :                 &frozen_layer,
    4341              :                 &self.metrics,
    4342              :             );
    4343              : 
    4344              :             if self.set_disk_consistent_lsn(disk_consistent_lsn) {
    4345              :                 // Schedule remote uploads that will reflect our new disk_consistent_lsn
    4346              :                 self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
    4347            0 :                     .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
    4348              :             }
    4349              :             // release lock on 'layers'
    4350              :         };
    4351              : 
    4352              :         // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
    4353              :         // This makes us refuse ingest until the new layers have been persisted to the remote
    4354              :         // TODO: remove this, and rely on l0_flush_{delay,stall}_threshold instead.
    4355              :         if self.get_l0_flush_wait_upload() {
    4356              :             let start = Instant::now();
    4357              :             self.remote_client
    4358              :                 .wait_completion()
    4359              :                 .await
    4360            0 :                 .map_err(|e| match e {
    4361              :                     WaitCompletionError::UploadQueueShutDownOrStopped
    4362              :                     | WaitCompletionError::NotInitialized(
    4363              :                         NotInitialized::ShuttingDown | NotInitialized::Stopped,
    4364            0 :                     ) => FlushLayerError::Cancelled,
    4365              :                     WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
    4366            0 :                         FlushLayerError::Other(anyhow!(e).into())
    4367              :                     }
    4368            0 :                 })?;
    4369              :             let duration = start.elapsed().as_secs_f64();
    4370              :             self.metrics.flush_wait_upload_time_gauge_add(duration);
    4371              :         }
    4372              : 
    4373              :         // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
    4374              :         // a compaction can delete the file and then it won't be available for uploads any more.
    4375              :         // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
    4376              :         // race situation.
    4377              :         // See https://github.com/neondatabase/neon/issues/4526
    4378              :         pausable_failpoint!("flush-frozen-pausable");
    4379              : 
    4380              :         // This failpoint is used by another test case `test_pageserver_recovery`.
    4381              :         fail_point!("flush-frozen-exit");
    4382              : 
    4383              :         Ok(Lsn(lsn_range.end.0 - 1))
    4384              :     }
    4385              : 
    4386              :     /// Return true if the value changed
    4387              :     ///
    4388              :     /// This function must only be used from the layer flush task.
    4389         2349 :     fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
    4390         2349 :         let old_value = self.disk_consistent_lsn.fetch_max(new_value);
    4391         2349 :         assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
    4392              : 
    4393         2349 :         self.metrics
    4394         2349 :             .disk_consistent_lsn_gauge
    4395         2349 :             .set(new_value.0 as i64);
    4396         2349 :         new_value != old_value
    4397         2349 :     }
    4398              : 
    4399              :     /// Update metadata file
    4400         2449 :     fn schedule_uploads(
    4401         2449 :         &self,
    4402         2449 :         disk_consistent_lsn: Lsn,
    4403         2449 :         layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
    4404         2449 :     ) -> anyhow::Result<()> {
    4405         2449 :         // We can only save a valid 'prev_record_lsn' value on disk if we
    4406         2449 :         // flushed *all* in-memory changes to disk. We only track
    4407         2449 :         // 'prev_record_lsn' in memory for the latest processed record, so we
    4408         2449 :         // don't remember what the correct value that corresponds to some old
    4409         2449 :         // LSN is. But if we flush everything, then the value corresponding
    4410         2449 :         // current 'last_record_lsn' is correct and we can store it on disk.
    4411         2449 :         let RecordLsn {
    4412         2449 :             last: last_record_lsn,
    4413         2449 :             prev: prev_record_lsn,
    4414         2449 :         } = self.last_record_lsn.load();
    4415         2449 :         let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
    4416         2194 :             Some(prev_record_lsn)
    4417              :         } else {
    4418          255 :             None
    4419              :         };
    4420              : 
    4421         2449 :         let update = crate::tenant::metadata::MetadataUpdate::new(
    4422         2449 :             disk_consistent_lsn,
    4423         2449 :             ondisk_prev_record_lsn,
    4424         2449 :             *self.applied_gc_cutoff_lsn.read(),
    4425         2449 :         );
    4426         2449 : 
    4427         2449 :         fail_point!("checkpoint-before-saving-metadata", |x| bail!(
    4428            0 :             "{}",
    4429            0 :             x.unwrap()
    4430         2449 :         ));
    4431              : 
    4432         4822 :         for layer in layers_to_upload {
    4433         2373 :             self.remote_client.schedule_layer_file_upload(layer)?;
    4434              :         }
    4435         2449 :         self.remote_client
    4436         2449 :             .schedule_index_upload_for_metadata_update(&update)?;
    4437              : 
    4438         2449 :         Ok(())
    4439         2449 :     }
    4440              : 
    4441            0 :     pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
    4442            0 :         self.remote_client
    4443            0 :             .preserve_initdb_archive(
    4444            0 :                 &self.tenant_shard_id.tenant_id,
    4445            0 :                 &self.timeline_id,
    4446            0 :                 &self.cancel,
    4447            0 :             )
    4448            0 :             .await
    4449            0 :     }
    4450              : 
    4451              :     // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
    4452              :     // in layer map immediately. The caller is responsible to put it into the layer map.
    4453         1936 :     async fn create_delta_layer(
    4454         1936 :         self: &Arc<Self>,
    4455         1936 :         frozen_layer: &Arc<InMemoryLayer>,
    4456         1936 :         key_range: Option<Range<Key>>,
    4457         1936 :         ctx: &RequestContext,
    4458         1936 :     ) -> anyhow::Result<Option<ResidentLayer>> {
    4459         1936 :         let self_clone = Arc::clone(self);
    4460         1936 :         let frozen_layer = Arc::clone(frozen_layer);
    4461         1936 :         let ctx = ctx.attached_child();
    4462         1936 :         let work = async move {
    4463         1936 :             let Some((desc, path)) = frozen_layer
    4464         1936 :                 .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
    4465         1936 :                 .await?
    4466              :             else {
    4467            0 :                 return Ok(None);
    4468              :             };
    4469         1936 :             let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
    4470              : 
    4471              :             // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
    4472              :             // We just need to fsync the directory in which these inodes are linked,
    4473              :             // which we know to be the timeline directory.
    4474              :             //
    4475              :             // We use fatal_err() below because the after write_to_disk returns with success,
    4476              :             // the in-memory state of the filesystem already has the layer file in its final place,
    4477              :             // and subsequent pageserver code could think it's durable while it really isn't.
    4478         1936 :             let timeline_dir = VirtualFile::open(
    4479         1936 :                 &self_clone
    4480         1936 :                     .conf
    4481         1936 :                     .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
    4482         1936 :                 &ctx,
    4483         1936 :             )
    4484         1936 :             .await
    4485         1936 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    4486         1936 :             timeline_dir
    4487         1936 :                 .sync_all()
    4488         1936 :                 .await
    4489         1936 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    4490         1936 :             anyhow::Ok(Some(new_delta))
    4491         1936 :         };
    4492              :         // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
    4493              :         // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
    4494              :         use crate::virtual_file::io_engine::IoEngine;
    4495         1936 :         match crate::virtual_file::io_engine::get() {
    4496            0 :             IoEngine::NotSet => panic!("io engine not set"),
    4497              :             IoEngine::StdFs => {
    4498          968 :                 let span = tracing::info_span!("blocking");
    4499          968 :                 tokio::task::spawn_blocking({
    4500          968 :                     move || Handle::current().block_on(work.instrument(span))
    4501          968 :                 })
    4502          968 :                 .await
    4503          968 :                 .context("spawn_blocking")
    4504          968 :                 .and_then(|x| x)
    4505              :             }
    4506              :             #[cfg(target_os = "linux")]
    4507          968 :             IoEngine::TokioEpollUring => work.await,
    4508              :         }
    4509         1936 :     }
    4510              : 
    4511         1181 :     async fn repartition(
    4512         1181 :         &self,
    4513         1181 :         lsn: Lsn,
    4514         1181 :         partition_size: u64,
    4515         1181 :         flags: EnumSet<CompactFlags>,
    4516         1181 :         ctx: &RequestContext,
    4517         1181 :     ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
    4518         1181 :         let Ok(mut guard) = self.partitioning.try_write_guard() else {
    4519              :             // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
    4520              :             // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
    4521              :             // and hence before the compaction task starts.
    4522            0 :             return Err(CompactionError::Other(anyhow!(
    4523            0 :                 "repartition() called concurrently"
    4524            0 :             )));
    4525              :         };
    4526         1181 :         let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
    4527         1181 :         if lsn < *partition_lsn {
    4528            0 :             return Err(CompactionError::Other(anyhow!(
    4529            0 :                 "repartition() called with LSN going backwards, this should not happen"
    4530            0 :             )));
    4531         1181 :         }
    4532         1181 : 
    4533         1181 :         let distance = lsn.0 - partition_lsn.0;
    4534         1181 :         if *partition_lsn != Lsn(0)
    4535          564 :             && distance <= self.repartition_threshold
    4536          564 :             && !flags.contains(CompactFlags::ForceRepartition)
    4537              :         {
    4538          536 :             debug!(
    4539              :                 distance,
    4540              :                 threshold = self.repartition_threshold,
    4541            0 :                 "no repartitioning needed"
    4542              :             );
    4543          536 :             return Ok((
    4544          536 :                 (dense_partition.clone(), sparse_partition.clone()),
    4545          536 :                 *partition_lsn,
    4546          536 :             ));
    4547          645 :         }
    4548              : 
    4549          645 :         let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
    4550          645 :         let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
    4551          645 :         let sparse_partitioning = SparseKeyPartitioning {
    4552          645 :             parts: vec![sparse_ks],
    4553          645 :         }; // no partitioning for metadata keys for now
    4554          645 :         let result = ((dense_partitioning, sparse_partitioning), lsn);
    4555          645 :         guard.write(result.clone());
    4556          645 :         Ok(result)
    4557         1181 :     }
    4558              : 
    4559              :     // Is it time to create a new image layer for the given partition? True if we want to generate.
    4560            0 :     async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
    4561            0 :         let threshold = self.get_image_creation_threshold();
    4562              : 
    4563            0 :         let guard = self.layers.read().await;
    4564            0 :         let Ok(layers) = guard.layer_map() else {
    4565            0 :             return false;
    4566              :         };
    4567              : 
    4568            0 :         let mut max_deltas = 0;
    4569            0 :         for part_range in &partition.ranges {
    4570            0 :             let image_coverage = layers.image_coverage(part_range, lsn);
    4571            0 :             for (img_range, last_img) in image_coverage {
    4572            0 :                 let img_lsn = if let Some(last_img) = last_img {
    4573            0 :                     last_img.get_lsn_range().end
    4574              :                 } else {
    4575            0 :                     Lsn(0)
    4576              :                 };
    4577              :                 // Let's consider an example:
    4578              :                 //
    4579              :                 // delta layer with LSN range 71-81
    4580              :                 // delta layer with LSN range 81-91
    4581              :                 // delta layer with LSN range 91-101
    4582              :                 // image layer at LSN 100
    4583              :                 //
    4584              :                 // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
    4585              :                 // there's no need to create a new one. We check this case explicitly, to avoid passing
    4586              :                 // a bogus range to count_deltas below, with start > end. It's even possible that there
    4587              :                 // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
    4588              :                 // after we read last_record_lsn, which is passed here in the 'lsn' argument.
    4589            0 :                 if img_lsn < lsn {
    4590            0 :                     let num_deltas =
    4591            0 :                         layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
    4592            0 : 
    4593            0 :                     max_deltas = max_deltas.max(num_deltas);
    4594            0 :                     if num_deltas >= threshold {
    4595            0 :                         debug!(
    4596            0 :                             "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
    4597              :                             img_range.start, img_range.end, num_deltas, img_lsn, lsn
    4598              :                         );
    4599            0 :                         return true;
    4600            0 :                     }
    4601            0 :                 }
    4602              :             }
    4603              :         }
    4604              : 
    4605            0 :         debug!(
    4606              :             max_deltas,
    4607            0 :             "none of the partitioned ranges had >= {threshold} deltas"
    4608              :         );
    4609            0 :         false
    4610            0 :     }
    4611              : 
    4612              :     /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
    4613              :     /// so that at most one image layer will be produced from this function.
    4614              :     #[allow(clippy::too_many_arguments)]
    4615          457 :     async fn create_image_layer_for_rel_blocks(
    4616          457 :         self: &Arc<Self>,
    4617          457 :         partition: &KeySpace,
    4618          457 :         mut image_layer_writer: ImageLayerWriter,
    4619          457 :         lsn: Lsn,
    4620          457 :         ctx: &RequestContext,
    4621          457 :         img_range: Range<Key>,
    4622          457 :         io_concurrency: IoConcurrency,
    4623          457 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4624          457 :         let mut wrote_keys = false;
    4625          457 : 
    4626          457 :         let mut key_request_accum = KeySpaceAccum::new();
    4627         3023 :         for range in &partition.ranges {
    4628         2566 :             let mut key = range.start;
    4629         5561 :             while key < range.end {
    4630              :                 // Decide whether to retain this key: usually we do, but sharded tenants may
    4631              :                 // need to drop keys that don't belong to them.  If we retain the key, add it
    4632              :                 // to `key_request_accum` for later issuing a vectored get
    4633         2995 :                 if self.shard_identity.is_key_disposable(&key) {
    4634            0 :                     debug!(
    4635            0 :                         "Dropping key {} during compaction (it belongs on shard {:?})",
    4636            0 :                         key,
    4637            0 :                         self.shard_identity.get_shard_number(&key)
    4638              :                     );
    4639         2995 :                 } else {
    4640         2995 :                     key_request_accum.add_key(key);
    4641         2995 :                 }
    4642              : 
    4643         2995 :                 let last_key_in_range = key.next() == range.end;
    4644         2995 :                 key = key.next();
    4645         2995 : 
    4646         2995 :                 // Maybe flush `key_rest_accum`
    4647         2995 :                 if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
    4648         2995 :                     || (last_key_in_range && key_request_accum.raw_size() > 0)
    4649              :                 {
    4650         2566 :                     let results = self
    4651         2566 :                         .get_vectored(
    4652         2566 :                             key_request_accum.consume_keyspace(),
    4653         2566 :                             lsn,
    4654         2566 :                             io_concurrency.clone(),
    4655         2566 :                             ctx,
    4656         2566 :                         )
    4657         2566 :                         .await?;
    4658              : 
    4659         2566 :                     if self.cancel.is_cancelled() {
    4660            0 :                         return Err(CreateImageLayersError::Cancelled);
    4661         2566 :                     }
    4662              : 
    4663         5561 :                     for (img_key, img) in results {
    4664         2995 :                         let img = match img {
    4665         2995 :                             Ok(img) => img,
    4666            0 :                             Err(err) => {
    4667            0 :                                 // If we fail to reconstruct a VM or FSM page, we can zero the
    4668            0 :                                 // page without losing any actual user data. That seems better
    4669            0 :                                 // than failing repeatedly and getting stuck.
    4670            0 :                                 //
    4671            0 :                                 // We had a bug at one point, where we truncated the FSM and VM
    4672            0 :                                 // in the pageserver, but the Postgres didn't know about that
    4673            0 :                                 // and continued to generate incremental WAL records for pages
    4674            0 :                                 // that didn't exist in the pageserver. Trying to replay those
    4675            0 :                                 // WAL records failed to find the previous image of the page.
    4676            0 :                                 // This special case allows us to recover from that situation.
    4677            0 :                                 // See https://github.com/neondatabase/neon/issues/2601.
    4678            0 :                                 //
    4679            0 :                                 // Unfortunately we cannot do this for the main fork, or for
    4680            0 :                                 // any metadata keys, keys, as that would lead to actual data
    4681            0 :                                 // loss.
    4682            0 :                                 if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
    4683            0 :                                     warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
    4684            0 :                                     ZERO_PAGE.clone()
    4685              :                                 } else {
    4686            0 :                                     return Err(CreateImageLayersError::from(err));
    4687              :                                 }
    4688              :                             }
    4689              :                         };
    4690              : 
    4691              :                         // Write all the keys we just read into our new image layer.
    4692         2995 :                         image_layer_writer.put_image(img_key, img, ctx).await?;
    4693         2995 :                         wrote_keys = true;
    4694              :                     }
    4695          429 :                 }
    4696              :             }
    4697              :         }
    4698              : 
    4699          457 :         if wrote_keys {
    4700              :             // Normal path: we have written some data into the new image layer for this
    4701              :             // partition, so flush it to disk.
    4702          457 :             info!(
    4703            0 :                 "produced image layer for rel {}",
    4704            0 :                 ImageLayerName {
    4705            0 :                     key_range: img_range.clone(),
    4706            0 :                     lsn
    4707            0 :                 },
    4708              :             );
    4709          457 :             Ok(ImageLayerCreationOutcome::Generated {
    4710          457 :                 unfinished_image_layer: image_layer_writer,
    4711          457 :             })
    4712              :         } else {
    4713            0 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4714            0 :             Ok(ImageLayerCreationOutcome::Empty)
    4715              :         }
    4716          457 :     }
    4717              : 
    4718              :     /// Create an image layer for metadata keys. This function produces one image layer for all metadata
    4719              :     /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
    4720              :     /// would not be too large to fit in a single image layer.
    4721              :     ///
    4722              :     /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
    4723              :     /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
    4724              :     /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
    4725              :     /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
    4726              :     #[allow(clippy::too_many_arguments)]
    4727          441 :     async fn create_image_layer_for_metadata_keys(
    4728          441 :         self: &Arc<Self>,
    4729          441 :         partition: &KeySpace,
    4730          441 :         mut image_layer_writer: ImageLayerWriter,
    4731          441 :         lsn: Lsn,
    4732          441 :         ctx: &RequestContext,
    4733          441 :         img_range: Range<Key>,
    4734          441 :         mode: ImageLayerCreationMode,
    4735          441 :         io_concurrency: IoConcurrency,
    4736          441 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4737          441 :         // Metadata keys image layer creation.
    4738          441 :         let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
    4739          441 :         let begin = Instant::now();
    4740              :         // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
    4741              :         // not contain too many keys, otherwise this takes a lot of memory.
    4742          441 :         let data = self
    4743          441 :             .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
    4744          441 :             .await?;
    4745          441 :         let (data, total_kb_retrieved, total_keys_retrieved) = {
    4746          441 :             let mut new_data = BTreeMap::new();
    4747          441 :             let mut total_kb_retrieved = 0;
    4748          441 :             let mut total_keys_retrieved = 0;
    4749        12465 :             for (k, v) in data {
    4750        12024 :                 let v = v?;
    4751        12024 :                 total_kb_retrieved += KEY_SIZE + v.len();
    4752        12024 :                 total_keys_retrieved += 1;
    4753        12024 :                 new_data.insert(k, v);
    4754              :             }
    4755          441 :             (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
    4756          441 :         };
    4757          441 :         let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
    4758          441 :         let elapsed = begin.elapsed();
    4759          441 : 
    4760          441 :         let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
    4761          441 :         info!(
    4762            0 :             "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
    4763              :         );
    4764              : 
    4765          441 :         if !trigger_generation && mode == ImageLayerCreationMode::Try {
    4766            0 :             return Ok(ImageLayerCreationOutcome::Skip);
    4767          441 :         }
    4768          441 :         if self.cancel.is_cancelled() {
    4769            0 :             return Err(CreateImageLayersError::Cancelled);
    4770          441 :         }
    4771          441 :         let mut wrote_any_image = false;
    4772        12465 :         for (k, v) in data {
    4773        12024 :             if v.is_empty() {
    4774              :                 // the key has been deleted, it does not need an image
    4775              :                 // in metadata keyspace, an empty image == tombstone
    4776            8 :                 continue;
    4777        12016 :             }
    4778        12016 :             wrote_any_image = true;
    4779        12016 : 
    4780        12016 :             // No need to handle sharding b/c metadata keys are always on the 0-th shard.
    4781        12016 : 
    4782        12016 :             // TODO: split image layers to avoid too large layer files. Too large image files are not handled
    4783        12016 :             // on the normal data path either.
    4784        12016 :             image_layer_writer.put_image(k, v, ctx).await?;
    4785              :         }
    4786              : 
    4787          441 :         if wrote_any_image {
    4788              :             // Normal path: we have written some data into the new image layer for this
    4789              :             // partition, so flush it to disk.
    4790           20 :             info!(
    4791            0 :                 "created image layer for metadata {}",
    4792            0 :                 ImageLayerName {
    4793            0 :                     key_range: img_range.clone(),
    4794            0 :                     lsn
    4795            0 :                 }
    4796              :             );
    4797           20 :             Ok(ImageLayerCreationOutcome::Generated {
    4798           20 :                 unfinished_image_layer: image_layer_writer,
    4799           20 :             })
    4800              :         } else {
    4801          421 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4802          421 :             Ok(ImageLayerCreationOutcome::Empty)
    4803              :         }
    4804          441 :     }
    4805              : 
    4806              :     /// Predicate function which indicates whether we should check if new image layers
    4807              :     /// are required. Since checking if new image layers are required is expensive in
    4808              :     /// terms of CPU, we only do it in the following cases:
    4809              :     /// 1. If the timeline has ingested sufficient WAL to justify the cost
    4810              :     /// 2. If enough time has passed since the last check:
    4811              :     ///     1. For large tenants, we wish to perform the check more often since they
    4812              :     ///        suffer from the lack of image layers
    4813              :     ///     2. For small tenants (that can mostly fit in RAM), we use a much longer interval
    4814         1181 :     fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
    4815              :         const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
    4816              : 
    4817         1181 :         let last_checks_at = self.last_image_layer_creation_check_at.load();
    4818         1181 :         let distance = lsn
    4819         1181 :             .checked_sub(last_checks_at)
    4820         1181 :             .expect("Attempt to compact with LSN going backwards");
    4821         1181 :         let min_distance =
    4822         1181 :             self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
    4823         1181 : 
    4824         1181 :         let distance_based_decision = distance.0 >= min_distance;
    4825         1181 : 
    4826         1181 :         let mut time_based_decision = false;
    4827         1181 :         let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
    4828         1181 :         if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
    4829          977 :             let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
    4830              :             {
    4831            0 :                 self.get_checkpoint_timeout()
    4832              :             } else {
    4833          977 :                 Duration::from_secs(3600 * 48)
    4834              :             };
    4835              : 
    4836          977 :             time_based_decision = match *last_check_instant {
    4837          564 :                 Some(last_check) => {
    4838          564 :                     let elapsed = last_check.elapsed();
    4839          564 :                     elapsed >= check_required_after
    4840              :                 }
    4841          413 :                 None => true,
    4842              :             };
    4843          204 :         }
    4844              : 
    4845              :         // Do the expensive delta layer counting only if this timeline has ingested sufficient
    4846              :         // WAL since the last check or a checkpoint timeout interval has elapsed since the last
    4847              :         // check.
    4848         1181 :         let decision = distance_based_decision || time_based_decision;
    4849              : 
    4850         1181 :         if decision {
    4851          413 :             self.last_image_layer_creation_check_at.store(lsn);
    4852          413 :             *last_check_instant = Some(Instant::now());
    4853          768 :         }
    4854              : 
    4855         1181 :         decision
    4856         1181 :     }
    4857              : 
    4858              :     /// Returns the image layers generated and an enum indicating whether the process is fully completed.
    4859              :     /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
    4860              :     #[tracing::instrument(skip_all, fields(%lsn, %mode))]
    4861              :     async fn create_image_layers(
    4862              :         self: &Arc<Timeline>,
    4863              :         partitioning: &KeyPartitioning,
    4864              :         lsn: Lsn,
    4865              :         mode: ImageLayerCreationMode,
    4866              :         ctx: &RequestContext,
    4867              :         last_status: LastImageLayerCreationStatus,
    4868              :         yield_for_l0: bool,
    4869              :     ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
    4870              :         let timer = self.metrics.create_images_time_histo.start_timer();
    4871              : 
    4872              :         if partitioning.parts.is_empty() {
    4873              :             warn!("no partitions to create image layers for");
    4874              :             return Ok((vec![], LastImageLayerCreationStatus::Complete));
    4875              :         }
    4876              : 
    4877              :         // We need to avoid holes between generated image layers.
    4878              :         // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
    4879              :         // image layer with hole between them. In this case such layer can not be utilized by GC.
    4880              :         //
    4881              :         // How such hole between partitions can appear?
    4882              :         // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
    4883              :         // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
    4884              :         // If there is delta layer <100000000..300000000> then it never be garbage collected because
    4885              :         // image layers  <100000000..100000099> and <200000000..200000199> are not completely covering it.
    4886              :         let mut start = Key::MIN;
    4887              : 
    4888              :         let check_for_image_layers =
    4889              :             if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
    4890              :                 info!(
    4891              :                     "resuming image layer creation: last_status=incomplete, continue from {}",
    4892              :                     last_key
    4893              :                 );
    4894              :                 true
    4895              :             } else {
    4896              :                 self.should_check_if_image_layers_required(lsn)
    4897              :             };
    4898              : 
    4899              :         let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
    4900              : 
    4901              :         let mut all_generated = true;
    4902              : 
    4903              :         let mut partition_processed = 0;
    4904              :         let mut total_partitions = partitioning.parts.len();
    4905              :         let mut last_partition_processed = None;
    4906              :         let mut partition_parts = partitioning.parts.clone();
    4907              : 
    4908              :         if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
    4909              :             // We need to skip the partitions that have already been processed.
    4910              :             let mut found = false;
    4911              :             for (i, partition) in partition_parts.iter().enumerate() {
    4912              :                 if last_key <= partition.end().unwrap() {
    4913              :                     // ```plain
    4914              :                     // |------|--------|----------|------|
    4915              :                     //              ^last_key
    4916              :                     //                    ^start from this partition
    4917              :                     // ```
    4918              :                     // Why `i+1` instead of `i`?
    4919              :                     // It is possible that the user did some writes after the previous image layer creation attempt so that
    4920              :                     // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
    4921              :                     // still want to skip this partition, so that we can make progress and avoid generating image layers over
    4922              :                     // the same partition. Doing a mod to ensure we don't end up with an empty vec.
    4923              :                     if i + 1 >= total_partitions {
    4924              :                         // In general, this case should not happen -- if last_key is on the last partition, the previous
    4925              :                         // iteration of image layer creation should return a complete status.
    4926              :                         break; // with found=false
    4927              :                     }
    4928              :                     partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
    4929              :                     total_partitions = partition_parts.len();
    4930              :                     // Update the start key to the partition start.
    4931              :                     start = partition_parts[0].start().unwrap();
    4932              :                     found = true;
    4933              :                     break;
    4934              :                 }
    4935              :             }
    4936              :             if !found {
    4937              :                 // Last key is within the last partition, or larger than all partitions.
    4938              :                 return Ok((vec![], LastImageLayerCreationStatus::Complete));
    4939              :             }
    4940              :         }
    4941              : 
    4942              :         for partition in partition_parts.iter() {
    4943              :             if self.cancel.is_cancelled() {
    4944              :                 return Err(CreateImageLayersError::Cancelled);
    4945              :             }
    4946              :             partition_processed += 1;
    4947              :             let img_range = start..partition.ranges.last().unwrap().end;
    4948              :             let compact_metadata = partition.overlaps(&Key::metadata_key_range());
    4949              :             if compact_metadata {
    4950              :                 for range in &partition.ranges {
    4951              :                     assert!(
    4952              :                         range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
    4953              :                             && range.end.field1 <= METADATA_KEY_END_PREFIX,
    4954              :                         "metadata keys must be partitioned separately"
    4955              :                     );
    4956              :                 }
    4957              :                 if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
    4958              :                     // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
    4959              :                     // might mess up with evictions.
    4960              :                     start = img_range.end;
    4961              :                     continue;
    4962              :                 }
    4963              :                 // For initial and force modes, we always generate image layers for metadata keys.
    4964              :             } else if let ImageLayerCreationMode::Try = mode {
    4965              :                 // check_for_image_layers = false -> skip
    4966              :                 // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
    4967              :                 if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
    4968              :                     start = img_range.end;
    4969              :                     continue;
    4970              :                 }
    4971              :             }
    4972              :             if let ImageLayerCreationMode::Force = mode {
    4973              :                 // When forced to create image layers, we might try and create them where they already
    4974              :                 // exist.  This mode is only used in tests/debug.
    4975              :                 let layers = self.layers.read().await;
    4976              :                 if layers.contains_key(&PersistentLayerKey {
    4977              :                     key_range: img_range.clone(),
    4978              :                     lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
    4979              :                     is_delta: false,
    4980              :                 }) {
    4981              :                     // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
    4982              :                     // in the future.
    4983              :                     tracing::info!(
    4984              :                         "Skipping image layer at {lsn} {}..{}, already exists",
    4985              :                         img_range.start,
    4986              :                         img_range.end
    4987              :                     );
    4988              :                     start = img_range.end;
    4989              :                     continue;
    4990              :                 }
    4991              :             }
    4992              : 
    4993              :             let image_layer_writer = ImageLayerWriter::new(
    4994              :                 self.conf,
    4995              :                 self.timeline_id,
    4996              :                 self.tenant_shard_id,
    4997              :                 &img_range,
    4998              :                 lsn,
    4999              :                 ctx,
    5000              :             )
    5001              :             .await?;
    5002              : 
    5003            0 :             fail_point!("image-layer-writer-fail-before-finish", |_| {
    5004            0 :                 Err(CreateImageLayersError::Other(anyhow::anyhow!(
    5005            0 :                     "failpoint image-layer-writer-fail-before-finish"
    5006            0 :                 )))
    5007            0 :             });
    5008              : 
    5009              :             let io_concurrency = IoConcurrency::spawn_from_conf(
    5010              :                 self.conf,
    5011              :                 self.gate
    5012              :                     .enter()
    5013            0 :                     .map_err(|_| CreateImageLayersError::Cancelled)?,
    5014              :             );
    5015              : 
    5016              :             let outcome = if !compact_metadata {
    5017              :                 self.create_image_layer_for_rel_blocks(
    5018              :                     partition,
    5019              :                     image_layer_writer,
    5020              :                     lsn,
    5021              :                     ctx,
    5022              :                     img_range.clone(),
    5023              :                     io_concurrency,
    5024              :                 )
    5025              :                 .await?
    5026              :             } else {
    5027              :                 self.create_image_layer_for_metadata_keys(
    5028              :                     partition,
    5029              :                     image_layer_writer,
    5030              :                     lsn,
    5031              :                     ctx,
    5032              :                     img_range.clone(),
    5033              :                     mode,
    5034              :                     io_concurrency,
    5035              :                 )
    5036              :                 .await?
    5037              :             };
    5038              :             match outcome {
    5039              :                 ImageLayerCreationOutcome::Empty => {
    5040              :                     // No data in this partition, so we don't need to create an image layer (for now).
    5041              :                     // The next image layer should cover this key range, so we don't advance the `start`
    5042              :                     // key.
    5043              :                 }
    5044              :                 ImageLayerCreationOutcome::Generated {
    5045              :                     unfinished_image_layer,
    5046              :                 } => {
    5047              :                     batch_image_writer.add_unfinished_image_writer(
    5048              :                         unfinished_image_layer,
    5049              :                         img_range.clone(),
    5050              :                         lsn,
    5051              :                     );
    5052              :                     // The next image layer should be generated right after this one.
    5053              :                     start = img_range.end;
    5054              :                 }
    5055              :                 ImageLayerCreationOutcome::Skip => {
    5056              :                     // We don't need to create an image layer for this partition.
    5057              :                     // The next image layer should NOT cover this range, otherwise
    5058              :                     // the keyspace becomes empty (reads don't go past image layers).
    5059              :                     start = img_range.end;
    5060              :                 }
    5061              :             }
    5062              : 
    5063              :             if let ImageLayerCreationMode::Try = mode {
    5064              :                 // We have at least made some progress
    5065              :                 if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
    5066              :                     // The `Try` mode is currently only used on the compaction path. We want to avoid
    5067              :                     // image layer generation taking too long time and blocking L0 compaction. So in this
    5068              :                     // mode, we also inspect the current number of L0 layers and skip image layer generation
    5069              :                     // if there are too many of them.
    5070              :                     let num_of_l0_layers = {
    5071              :                         let layers = self.layers.read().await;
    5072              :                         layers.layer_map()?.level0_deltas().len()
    5073              :                     };
    5074              :                     let image_preempt_threshold = self.get_image_creation_preempt_threshold()
    5075              :                         * self.get_compaction_threshold();
    5076              :                     if image_preempt_threshold != 0 && num_of_l0_layers >= image_preempt_threshold {
    5077              :                         tracing::info!(
    5078              :                         "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers {}",
    5079              :                         partition.start().unwrap(), partition.end().unwrap(), num_of_l0_layers
    5080              :                     );
    5081              :                         last_partition_processed = Some(partition.clone());
    5082              :                         all_generated = false;
    5083              :                         break;
    5084              :                     }
    5085              :                 }
    5086              :             }
    5087              :         }
    5088              : 
    5089              :         let image_layers = batch_image_writer.finish(self, ctx).await?;
    5090              : 
    5091              :         let mut guard = self.layers.write().await;
    5092              : 
    5093              :         // FIXME: we could add the images to be uploaded *before* returning from here, but right
    5094              :         // now they are being scheduled outside of write lock; current way is inconsistent with
    5095              :         // compaction lock order.
    5096              :         guard
    5097              :             .open_mut()?
    5098              :             .track_new_image_layers(&image_layers, &self.metrics);
    5099              :         drop_wlock(guard);
    5100              :         let duration = timer.stop_and_record();
    5101              : 
    5102              :         // Creating image layers may have caused some previously visible layers to be covered
    5103              :         if !image_layers.is_empty() {
    5104              :             self.update_layer_visibility().await?;
    5105              :         }
    5106              : 
    5107              :         let total_layer_size = image_layers
    5108              :             .iter()
    5109          477 :             .map(|l| l.metadata().file_size)
    5110              :             .sum::<u64>();
    5111              : 
    5112              :         info!(
    5113              :             "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
    5114              :             image_layers.len(),
    5115              :             total_layer_size,
    5116              :             duration.as_secs_f64(),
    5117              :             partition_processed,
    5118              :             total_partitions
    5119              :         );
    5120              : 
    5121              :         Ok((
    5122              :             image_layers,
    5123              :             if all_generated {
    5124              :                 LastImageLayerCreationStatus::Complete
    5125              :             } else {
    5126              :                 LastImageLayerCreationStatus::Incomplete {
    5127              :                     last_key: if let Some(last_partition_processed) = last_partition_processed {
    5128              :                         last_partition_processed.end().unwrap_or(Key::MIN)
    5129              :                     } else {
    5130              :                         // This branch should be unreachable, but in case it happens, we can just return the start key.
    5131              :                         Key::MIN
    5132              :                     },
    5133              :                 }
    5134              :             },
    5135              :         ))
    5136              :     }
    5137              : 
    5138              :     /// Wait until the background initial logical size calculation is complete, or
    5139              :     /// this Timeline is shut down.  Calling this function will cause the initial
    5140              :     /// logical size calculation to skip waiting for the background jobs barrier.
    5141            0 :     pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
    5142            0 :         if !self.shard_identity.is_shard_zero() {
    5143              :             // We don't populate logical size on shard >0: skip waiting for it.
    5144            0 :             return;
    5145            0 :         }
    5146            0 : 
    5147            0 :         if self.remote_client.is_deleting() {
    5148              :             // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
    5149            0 :             return;
    5150            0 :         }
    5151            0 : 
    5152            0 :         if self.current_logical_size.current_size().is_exact() {
    5153              :             // root timelines are initialized with exact count, but never start the background
    5154              :             // calculation
    5155            0 :             return;
    5156            0 :         }
    5157              : 
    5158            0 :         if let Some(await_bg_cancel) = self
    5159            0 :             .current_logical_size
    5160            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore
    5161            0 :             .get()
    5162            0 :         {
    5163            0 :             await_bg_cancel.cancel();
    5164            0 :         } else {
    5165              :             // We should not wait if we were not able to explicitly instruct
    5166              :             // the logical size cancellation to skip the concurrency limit semaphore.
    5167              :             // TODO: this is an unexpected case.  We should restructure so that it
    5168              :             // can't happen.
    5169            0 :             tracing::warn!(
    5170            0 :                 "await_initial_logical_size: can't get semaphore cancel token, skipping"
    5171              :             );
    5172            0 :             debug_assert!(false);
    5173              :         }
    5174              : 
    5175            0 :         tokio::select!(
    5176            0 :             _ = self.current_logical_size.initialized.acquire() => {},
    5177            0 :             _ = self.cancel.cancelled() => {}
    5178              :         )
    5179            0 :     }
    5180              : 
    5181              :     /// Detach this timeline from its ancestor by copying all of ancestors layers as this
    5182              :     /// Timelines layers up to the ancestor_lsn.
    5183              :     ///
    5184              :     /// Requires a timeline that:
    5185              :     /// - has an ancestor to detach from
    5186              :     /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
    5187              :     ///   a technical requirement
    5188              :     ///
    5189              :     /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
    5190              :     /// polled again until completion.
    5191              :     ///
    5192              :     /// During the operation all timelines sharing the data with this timeline will be reparented
    5193              :     /// from our ancestor to be branches of this timeline.
    5194            0 :     pub(crate) async fn prepare_to_detach_from_ancestor(
    5195            0 :         self: &Arc<Timeline>,
    5196            0 :         tenant: &crate::tenant::Tenant,
    5197            0 :         options: detach_ancestor::Options,
    5198            0 :         ctx: &RequestContext,
    5199            0 :     ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
    5200            0 :         detach_ancestor::prepare(self, tenant, options, ctx).await
    5201            0 :     }
    5202              : 
    5203              :     /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
    5204              :     /// reparents any reparentable children of previous ancestor.
    5205              :     ///
    5206              :     /// This method is to be called while holding the TenantManager's tenant slot, so during this
    5207              :     /// method we cannot be deleted nor can any timeline be deleted. After this method returns
    5208              :     /// successfully, tenant must be reloaded.
    5209              :     ///
    5210              :     /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
    5211              :     /// resetting the tenant.
    5212            0 :     pub(crate) async fn detach_from_ancestor_and_reparent(
    5213            0 :         self: &Arc<Timeline>,
    5214            0 :         tenant: &crate::tenant::Tenant,
    5215            0 :         prepared: detach_ancestor::PreparedTimelineDetach,
    5216            0 :         ctx: &RequestContext,
    5217            0 :     ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
    5218            0 :         detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
    5219            0 :     }
    5220              : 
    5221              :     /// Final step which unblocks the GC.
    5222              :     ///
    5223              :     /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
    5224            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    5225            0 :         self: &Arc<Timeline>,
    5226            0 :         tenant: &crate::tenant::Tenant,
    5227            0 :         attempt: detach_ancestor::Attempt,
    5228            0 :         ctx: &RequestContext,
    5229            0 :     ) -> Result<(), detach_ancestor::Error> {
    5230            0 :         detach_ancestor::complete(self, tenant, attempt, ctx).await
    5231            0 :     }
    5232              : }
    5233              : 
    5234              : impl Drop for Timeline {
    5235           20 :     fn drop(&mut self) {
    5236           20 :         if let Some(ancestor) = &self.ancestor_timeline {
    5237              :             // This lock should never be poisoned, but in case it is we do a .map() instead of
    5238              :             // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
    5239            8 :             if let Ok(mut gc_info) = ancestor.gc_info.write() {
    5240            8 :                 if !gc_info.remove_child_not_offloaded(self.timeline_id) {
    5241            0 :                     tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
    5242            0 :                         "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
    5243            8 :                 }
    5244            0 :             }
    5245           12 :         }
    5246           20 :         info!(
    5247            0 :             "Timeline {} for tenant {} is being dropped",
    5248              :             self.timeline_id, self.tenant_shard_id.tenant_id
    5249              :         );
    5250           20 :     }
    5251              : }
    5252              : 
    5253              : /// Top-level failure to compact.
    5254              : #[derive(Debug, thiserror::Error)]
    5255              : pub(crate) enum CompactionError {
    5256              :     #[error("The timeline or pageserver is shutting down")]
    5257              :     ShuttingDown,
    5258              :     /// Compaction tried to offload a timeline and failed
    5259              :     #[error("Failed to offload timeline: {0}")]
    5260              :     Offload(OffloadError),
    5261              :     /// Compaction cannot be done right now; page reconstruction and so on.
    5262              :     #[error(transparent)]
    5263              :     Other(anyhow::Error),
    5264              : }
    5265              : 
    5266              : impl From<OffloadError> for CompactionError {
    5267            0 :     fn from(e: OffloadError) -> Self {
    5268            0 :         match e {
    5269            0 :             OffloadError::Cancelled => Self::ShuttingDown,
    5270            0 :             _ => Self::Offload(e),
    5271              :         }
    5272            0 :     }
    5273              : }
    5274              : 
    5275              : impl CompactionError {
    5276            0 :     pub fn is_cancelled(&self) -> bool {
    5277            0 :         matches!(self, CompactionError::ShuttingDown)
    5278            0 :     }
    5279              : }
    5280              : 
    5281              : impl From<CollectKeySpaceError> for CompactionError {
    5282            0 :     fn from(err: CollectKeySpaceError) -> Self {
    5283            0 :         match err {
    5284              :             CollectKeySpaceError::Cancelled
    5285              :             | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
    5286            0 :                 CompactionError::ShuttingDown
    5287              :             }
    5288            0 :             e => CompactionError::Other(e.into()),
    5289              :         }
    5290            0 :     }
    5291              : }
    5292              : 
    5293              : impl From<super::upload_queue::NotInitialized> for CompactionError {
    5294            0 :     fn from(value: super::upload_queue::NotInitialized) -> Self {
    5295            0 :         match value {
    5296              :             super::upload_queue::NotInitialized::Uninitialized => {
    5297            0 :                 CompactionError::Other(anyhow::anyhow!(value))
    5298              :             }
    5299              :             super::upload_queue::NotInitialized::ShuttingDown
    5300            0 :             | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
    5301              :         }
    5302            0 :     }
    5303              : }
    5304              : 
    5305              : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
    5306            0 :     fn from(e: super::storage_layer::layer::DownloadError) -> Self {
    5307            0 :         match e {
    5308              :             super::storage_layer::layer::DownloadError::TimelineShutdown
    5309              :             | super::storage_layer::layer::DownloadError::DownloadCancelled => {
    5310            0 :                 CompactionError::ShuttingDown
    5311              :             }
    5312              :             super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
    5313              :             | super::storage_layer::layer::DownloadError::DownloadRequired
    5314              :             | super::storage_layer::layer::DownloadError::NotFile(_)
    5315              :             | super::storage_layer::layer::DownloadError::DownloadFailed
    5316              :             | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
    5317            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    5318              :             }
    5319              :             #[cfg(test)]
    5320              :             super::storage_layer::layer::DownloadError::Failpoint(_) => {
    5321            0 :                 CompactionError::Other(anyhow::anyhow!(e))
    5322              :             }
    5323              :         }
    5324            0 :     }
    5325              : }
    5326              : 
    5327              : impl From<layer_manager::Shutdown> for CompactionError {
    5328            0 :     fn from(_: layer_manager::Shutdown) -> Self {
    5329            0 :         CompactionError::ShuttingDown
    5330            0 :     }
    5331              : }
    5332              : 
    5333              : #[serde_as]
    5334          644 : #[derive(serde::Serialize)]
    5335              : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
    5336              : 
    5337              : #[derive(Default)]
    5338              : enum DurationRecorder {
    5339              :     #[default]
    5340              :     NotStarted,
    5341              :     Recorded(RecordedDuration, tokio::time::Instant),
    5342              : }
    5343              : 
    5344              : impl DurationRecorder {
    5345         1228 :     fn till_now(&self) -> DurationRecorder {
    5346         1228 :         match self {
    5347              :             DurationRecorder::NotStarted => {
    5348            0 :                 panic!("must only call on recorded measurements")
    5349              :             }
    5350         1228 :             DurationRecorder::Recorded(_, ended) => {
    5351         1228 :                 let now = tokio::time::Instant::now();
    5352         1228 :                 DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
    5353         1228 :             }
    5354         1228 :         }
    5355         1228 :     }
    5356          644 :     fn into_recorded(self) -> Option<RecordedDuration> {
    5357          644 :         match self {
    5358            0 :             DurationRecorder::NotStarted => None,
    5359          644 :             DurationRecorder::Recorded(recorded, _) => Some(recorded),
    5360              :         }
    5361          644 :     }
    5362              : }
    5363              : 
    5364              : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
    5365              : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
    5366              : /// the layer descriptor requires the user to provide the ranges, which should cover all
    5367              : /// keys specified in the `data` field.
    5368              : #[cfg(test)]
    5369              : #[derive(Clone)]
    5370              : pub struct DeltaLayerTestDesc {
    5371              :     pub lsn_range: Range<Lsn>,
    5372              :     pub key_range: Range<Key>,
    5373              :     pub data: Vec<(Key, Lsn, Value)>,
    5374              : }
    5375              : 
    5376              : #[cfg(test)]
    5377              : impl DeltaLayerTestDesc {
    5378            9 :     pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
    5379            9 :         Self {
    5380            9 :             lsn_range,
    5381            9 :             key_range,
    5382            9 :             data,
    5383            9 :         }
    5384            9 :     }
    5385              : 
    5386          178 :     pub fn new_with_inferred_key_range(
    5387          178 :         lsn_range: Range<Lsn>,
    5388          178 :         data: Vec<(Key, Lsn, Value)>,
    5389          178 :     ) -> Self {
    5390          442 :         let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
    5391          442 :         let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
    5392          178 :         Self {
    5393          178 :             key_range: (*key_min)..(key_max.next()),
    5394          178 :             lsn_range,
    5395          178 :             data,
    5396          178 :         }
    5397          178 :     }
    5398              : 
    5399           25 :     pub(crate) fn layer_name(&self) -> LayerName {
    5400           25 :         LayerName::Delta(super::storage_layer::DeltaLayerName {
    5401           25 :             key_range: self.key_range.clone(),
    5402           25 :             lsn_range: self.lsn_range.clone(),
    5403           25 :         })
    5404           25 :     }
    5405              : }
    5406              : 
    5407              : impl Timeline {
    5408           92 :     async fn finish_compact_batch(
    5409           92 :         self: &Arc<Self>,
    5410           92 :         new_deltas: &[ResidentLayer],
    5411           92 :         new_images: &[ResidentLayer],
    5412           92 :         layers_to_remove: &[Layer],
    5413           92 :     ) -> Result<(), CompactionError> {
    5414           92 :         let mut guard = tokio::select! {
    5415           92 :             guard = self.layers.write() => guard,
    5416           92 :             _ = self.cancel.cancelled() => {
    5417            0 :                 return Err(CompactionError::ShuttingDown);
    5418              :             }
    5419              :         };
    5420              : 
    5421           92 :         let mut duplicated_layers = HashSet::new();
    5422           92 : 
    5423           92 :         let mut insert_layers = Vec::with_capacity(new_deltas.len());
    5424              : 
    5425          744 :         for l in new_deltas {
    5426          652 :             if guard.contains(l.as_ref()) {
    5427              :                 // expected in tests
    5428            0 :                 tracing::error!(layer=%l, "duplicated L1 layer");
    5429              : 
    5430              :                 // good ways to cause a duplicate: we repeatedly error after taking the writelock
    5431              :                 // `guard`  on self.layers. as of writing this, there are no error returns except
    5432              :                 // for compact_level0_phase1 creating an L0, which does not happen in practice
    5433              :                 // because we have not implemented L0 => L0 compaction.
    5434            0 :                 duplicated_layers.insert(l.layer_desc().key());
    5435          652 :             } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
    5436            0 :                 return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
    5437          652 :             } else {
    5438          652 :                 insert_layers.push(l.clone());
    5439          652 :             }
    5440              :         }
    5441              : 
    5442              :         // only remove those inputs which were not outputs
    5443           92 :         let remove_layers: Vec<Layer> = layers_to_remove
    5444           92 :             .iter()
    5445          804 :             .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
    5446           92 :             .cloned()
    5447           92 :             .collect();
    5448           92 : 
    5449           92 :         if !new_images.is_empty() {
    5450            0 :             guard
    5451            0 :                 .open_mut()?
    5452            0 :                 .track_new_image_layers(new_images, &self.metrics);
    5453           92 :         }
    5454              : 
    5455           92 :         guard
    5456           92 :             .open_mut()?
    5457           92 :             .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
    5458           92 : 
    5459           92 :         self.remote_client
    5460           92 :             .schedule_compaction_update(&remove_layers, new_deltas)?;
    5461              : 
    5462           92 :         drop_wlock(guard);
    5463           92 : 
    5464           92 :         Ok(())
    5465           92 :     }
    5466              : 
    5467            0 :     async fn rewrite_layers(
    5468            0 :         self: &Arc<Self>,
    5469            0 :         mut replace_layers: Vec<(Layer, ResidentLayer)>,
    5470            0 :         mut drop_layers: Vec<Layer>,
    5471            0 :     ) -> Result<(), CompactionError> {
    5472            0 :         let mut guard = self.layers.write().await;
    5473              : 
    5474              :         // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
    5475              :         // to avoid double-removing, and avoid rewriting something that was removed.
    5476            0 :         replace_layers.retain(|(l, _)| guard.contains(l));
    5477            0 :         drop_layers.retain(|l| guard.contains(l));
    5478            0 : 
    5479            0 :         guard
    5480            0 :             .open_mut()?
    5481            0 :             .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
    5482            0 : 
    5483            0 :         let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
    5484            0 : 
    5485            0 :         self.remote_client
    5486            0 :             .schedule_compaction_update(&drop_layers, &upload_layers)?;
    5487              : 
    5488            0 :         Ok(())
    5489            0 :     }
    5490              : 
    5491              :     /// Schedules the uploads of the given image layers
    5492          768 :     fn upload_new_image_layers(
    5493          768 :         self: &Arc<Self>,
    5494          768 :         new_images: impl IntoIterator<Item = ResidentLayer>,
    5495          768 :     ) -> Result<(), super::upload_queue::NotInitialized> {
    5496          808 :         for layer in new_images {
    5497           40 :             self.remote_client.schedule_layer_file_upload(layer)?;
    5498              :         }
    5499              :         // should any new image layer been created, not uploading index_part will
    5500              :         // result in a mismatch between remote_physical_size and layermap calculated
    5501              :         // size, which will fail some tests, but should not be an issue otherwise.
    5502          768 :         self.remote_client
    5503          768 :             .schedule_index_upload_for_file_changes()?;
    5504          768 :         Ok(())
    5505          768 :     }
    5506              : 
    5507            0 :     async fn find_gc_time_cutoff(
    5508            0 :         &self,
    5509            0 :         now: SystemTime,
    5510            0 :         pitr: Duration,
    5511            0 :         cancel: &CancellationToken,
    5512            0 :         ctx: &RequestContext,
    5513            0 :     ) -> Result<Option<Lsn>, PageReconstructError> {
    5514            0 :         debug_assert_current_span_has_tenant_and_timeline_id();
    5515            0 :         if self.shard_identity.is_shard_zero() {
    5516              :             // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
    5517            0 :             let time_range = if pitr == Duration::ZERO {
    5518            0 :                 humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
    5519              :             } else {
    5520            0 :                 pitr
    5521              :             };
    5522              : 
    5523              :             // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
    5524            0 :             let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
    5525            0 :             let timestamp = to_pg_timestamp(time_cutoff);
    5526              : 
    5527            0 :             let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
    5528            0 :                 LsnForTimestamp::Present(lsn) => Some(lsn),
    5529            0 :                 LsnForTimestamp::Future(lsn) => {
    5530            0 :                     // The timestamp is in the future. That sounds impossible,
    5531            0 :                     // but what it really means is that there hasn't been
    5532            0 :                     // any commits since the cutoff timestamp.
    5533            0 :                     //
    5534            0 :                     // In this case we should use the LSN of the most recent commit,
    5535            0 :                     // which is implicitly the last LSN in the log.
    5536            0 :                     debug!("future({})", lsn);
    5537            0 :                     Some(self.get_last_record_lsn())
    5538              :                 }
    5539            0 :                 LsnForTimestamp::Past(lsn) => {
    5540            0 :                     debug!("past({})", lsn);
    5541            0 :                     None
    5542              :                 }
    5543            0 :                 LsnForTimestamp::NoData(lsn) => {
    5544            0 :                     debug!("nodata({})", lsn);
    5545            0 :                     None
    5546              :                 }
    5547              :             };
    5548            0 :             Ok(time_cutoff)
    5549              :         } else {
    5550              :             // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
    5551              :             // from shard zero's index.  The index doesn't explicitly tell us the time cutoff, but we may assume that
    5552              :             // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
    5553              :             // space cutoff that we would also have respected ourselves.
    5554            0 :             match self
    5555            0 :                 .remote_client
    5556            0 :                 .download_foreign_index(ShardNumber(0), cancel)
    5557            0 :                 .await
    5558              :             {
    5559            0 :                 Ok((index_part, index_generation, _index_mtime)) => {
    5560            0 :                     tracing::info!("GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
    5561            0 :                         index_part.metadata.latest_gc_cutoff_lsn());
    5562            0 :                     Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
    5563              :                 }
    5564              :                 Err(DownloadError::NotFound) => {
    5565              :                     // This is unexpected, because during timeline creations shard zero persists to remote
    5566              :                     // storage before other shards are called, and during timeline deletion non-zeroth shards are
    5567              :                     // deleted before the zeroth one.  However, it should be harmless: if we somehow end up in this
    5568              :                     // state, then shard zero should _eventually_ write an index when it GCs.
    5569            0 :                     tracing::warn!("GC couldn't find shard zero's index for timeline");
    5570            0 :                     Ok(None)
    5571              :                 }
    5572            0 :                 Err(e) => {
    5573            0 :                     // TODO: this function should return a different error type than page reconstruct error
    5574            0 :                     Err(PageReconstructError::Other(anyhow::anyhow!(e)))
    5575              :                 }
    5576              :             }
    5577              : 
    5578              :             // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
    5579              :             // controller.  Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
    5580              :             // is going through a migration if we read the old location's index and it has GC'd ahead of the
    5581              :             // new location.  This is legal in principle, but problematic in practice because it might result
    5582              :             // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
    5583              :             // because they have GC'd past the branch point.
    5584              :         }
    5585            0 :     }
    5586              : 
    5587              :     /// Find the Lsns above which layer files need to be retained on
    5588              :     /// garbage collection.
    5589              :     ///
    5590              :     /// We calculate two cutoffs, one based on time and one based on WAL size.  `pitr`
    5591              :     /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
    5592              :     /// the space-based retention.
    5593              :     ///
    5594              :     /// This function doesn't simply to calculate time & space based retention: it treats time-based
    5595              :     /// retention as authoritative if enabled, and falls back to space-based retention if calculating
    5596              :     /// the LSN for a time point isn't possible.  Therefore the GcCutoffs::horizon in the response might
    5597              :     /// be different to the `space_cutoff` input.  Callers should treat the min() of the two cutoffs
    5598              :     /// in the response as the GC cutoff point for the timeline.
    5599              :     #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
    5600              :     pub(super) async fn find_gc_cutoffs(
    5601              :         &self,
    5602              :         now: SystemTime,
    5603              :         space_cutoff: Lsn,
    5604              :         pitr: Duration,
    5605              :         cancel: &CancellationToken,
    5606              :         ctx: &RequestContext,
    5607              :     ) -> Result<GcCutoffs, PageReconstructError> {
    5608              :         let _timer = self
    5609              :             .metrics
    5610              :             .find_gc_cutoffs_histo
    5611              :             .start_timer()
    5612              :             .record_on_drop();
    5613              : 
    5614              :         pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
    5615              : 
    5616              :         if cfg!(test) {
    5617              :             // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
    5618              :             if pitr == Duration::ZERO {
    5619              :                 return Ok(GcCutoffs {
    5620              :                     time: self.get_last_record_lsn(),
    5621              :                     space: space_cutoff,
    5622              :                 });
    5623              :             }
    5624              :         }
    5625              : 
    5626              :         // Calculate a time-based limit on how much to retain:
    5627              :         // - if PITR interval is set, then this is our cutoff.
    5628              :         // - if PITR interval is not set, then we do a lookup
    5629              :         //   based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
    5630              :         let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
    5631              : 
    5632              :         Ok(match (pitr, time_cutoff) {
    5633              :             (Duration::ZERO, Some(time_cutoff)) => {
    5634              :                 // PITR is not set. Retain the size-based limit, or the default time retention,
    5635              :                 // whichever requires less data.
    5636              :                 GcCutoffs {
    5637              :                     time: self.get_last_record_lsn(),
    5638              :                     space: std::cmp::max(time_cutoff, space_cutoff),
    5639              :                 }
    5640              :             }
    5641              :             (Duration::ZERO, None) => {
    5642              :                 // PITR is not set, and time lookup failed
    5643              :                 GcCutoffs {
    5644              :                     time: self.get_last_record_lsn(),
    5645              :                     space: space_cutoff,
    5646              :                 }
    5647              :             }
    5648              :             (_, None) => {
    5649              :                 // PITR interval is set & we didn't look up a timestamp successfully.  Conservatively assume PITR
    5650              :                 // cannot advance beyond what was already GC'd, and respect space-based retention
    5651              :                 GcCutoffs {
    5652              :                     time: *self.get_applied_gc_cutoff_lsn(),
    5653              :                     space: space_cutoff,
    5654              :                 }
    5655              :             }
    5656              :             (_, Some(time_cutoff)) => {
    5657              :                 // PITR interval is set and we looked up timestamp successfully.  Ignore
    5658              :                 // size based retention and make time cutoff authoritative
    5659              :                 GcCutoffs {
    5660              :                     time: time_cutoff,
    5661              :                     space: time_cutoff,
    5662              :                 }
    5663              :             }
    5664              :         })
    5665              :     }
    5666              : 
    5667              :     /// Garbage collect layer files on a timeline that are no longer needed.
    5668              :     ///
    5669              :     /// Currently, we don't make any attempt at removing unneeded page versions
    5670              :     /// within a layer file. We can only remove the whole file if it's fully
    5671              :     /// obsolete.
    5672            8 :     pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
    5673              :         // this is most likely the background tasks, but it might be the spawned task from
    5674              :         // immediate_gc
    5675            8 :         let _g = tokio::select! {
    5676            8 :             guard = self.gc_lock.lock() => guard,
    5677            8 :             _ = self.cancel.cancelled() => return Ok(GcResult::default()),
    5678              :         };
    5679            8 :         let timer = self.metrics.garbage_collect_histo.start_timer();
    5680            8 : 
    5681            8 :         fail_point!("before-timeline-gc");
    5682            8 : 
    5683            8 :         // Is the timeline being deleted?
    5684            8 :         if self.is_stopping() {
    5685            0 :             return Err(GcError::TimelineCancelled);
    5686            8 :         }
    5687            8 : 
    5688            8 :         let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
    5689            8 :             let gc_info = self.gc_info.read().unwrap();
    5690            8 : 
    5691            8 :             let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
    5692            8 :             let time_cutoff = gc_info.cutoffs.time;
    5693            8 :             let retain_lsns = gc_info
    5694            8 :                 .retain_lsns
    5695            8 :                 .iter()
    5696            8 :                 .map(|(lsn, _child_id, _is_offloaded)| *lsn)
    5697            8 :                 .collect();
    5698            8 : 
    5699            8 :             // Gets the maximum LSN that holds the valid lease.
    5700            8 :             //
    5701            8 :             // Caveat: `refresh_gc_info` is in charged of updating the lease map.
    5702            8 :             // Here, we do not check for stale leases again.
    5703            8 :             let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
    5704            8 : 
    5705            8 :             (
    5706            8 :                 space_cutoff,
    5707            8 :                 time_cutoff,
    5708            8 :                 retain_lsns,
    5709            8 :                 max_lsn_with_valid_lease,
    5710            8 :             )
    5711            8 :         };
    5712            8 : 
    5713            8 :         let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
    5714            8 :         let standby_horizon = self.standby_horizon.load();
    5715            8 :         // Hold GC for the standby, but as a safety guard do it only within some
    5716            8 :         // reasonable lag.
    5717            8 :         if standby_horizon != Lsn::INVALID {
    5718            0 :             if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
    5719              :                 const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
    5720            0 :                 if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
    5721            0 :                     new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
    5722            0 :                     trace!("holding off GC for standby apply LSN {}", standby_horizon);
    5723              :                 } else {
    5724            0 :                     warn!(
    5725            0 :                         "standby is lagging for more than {}MB, not holding gc for it",
    5726            0 :                         MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
    5727              :                     )
    5728              :                 }
    5729            0 :             }
    5730            8 :         }
    5731              : 
    5732              :         // Reset standby horizon to ignore it if it is not updated till next GC.
    5733              :         // It is an easy way to unset it when standby disappears without adding
    5734              :         // more conf options.
    5735            8 :         self.standby_horizon.store(Lsn::INVALID);
    5736            8 :         self.metrics
    5737            8 :             .standby_horizon_gauge
    5738            8 :             .set(Lsn::INVALID.0 as i64);
    5739              : 
    5740            8 :         let res = self
    5741            8 :             .gc_timeline(
    5742            8 :                 space_cutoff,
    5743            8 :                 time_cutoff,
    5744            8 :                 retain_lsns,
    5745            8 :                 max_lsn_with_valid_lease,
    5746            8 :                 new_gc_cutoff,
    5747            8 :             )
    5748            8 :             .instrument(
    5749            8 :                 info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
    5750              :             )
    5751            8 :             .await?;
    5752              : 
    5753              :         // only record successes
    5754            8 :         timer.stop_and_record();
    5755            8 : 
    5756            8 :         Ok(res)
    5757            8 :     }
    5758              : 
    5759            8 :     async fn gc_timeline(
    5760            8 :         &self,
    5761            8 :         space_cutoff: Lsn,
    5762            8 :         time_cutoff: Lsn,
    5763            8 :         retain_lsns: Vec<Lsn>,
    5764            8 :         max_lsn_with_valid_lease: Option<Lsn>,
    5765            8 :         new_gc_cutoff: Lsn,
    5766            8 :     ) -> Result<GcResult, GcError> {
    5767            8 :         // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
    5768            8 : 
    5769            8 :         let now = SystemTime::now();
    5770            8 :         let mut result: GcResult = GcResult::default();
    5771            8 : 
    5772            8 :         // Nothing to GC. Return early.
    5773            8 :         let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
    5774            8 :         if latest_gc_cutoff >= new_gc_cutoff {
    5775            0 :             info!(
    5776            0 :                 "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
    5777              :             );
    5778            0 :             return Ok(result);
    5779            8 :         }
    5780              : 
    5781              :         // We need to ensure that no one tries to read page versions or create
    5782              :         // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
    5783              :         // for details. This will block until the old value is no longer in use.
    5784              :         //
    5785              :         // The GC cutoff should only ever move forwards.
    5786            8 :         let waitlist = {
    5787            8 :             let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
    5788            8 :             if *write_guard > new_gc_cutoff {
    5789            0 :                 return Err(GcError::BadLsn {
    5790            0 :                     why: format!(
    5791            0 :                         "Cannot move GC cutoff LSN backwards (was {}, new {})",
    5792            0 :                         *write_guard, new_gc_cutoff
    5793            0 :                     ),
    5794            0 :                 });
    5795            8 :             }
    5796            8 : 
    5797            8 :             write_guard.store_and_unlock(new_gc_cutoff)
    5798            8 :         };
    5799            8 :         waitlist.wait().await;
    5800              : 
    5801            8 :         info!("GC starting");
    5802              : 
    5803            8 :         debug!("retain_lsns: {:?}", retain_lsns);
    5804              : 
    5805            8 :         let mut layers_to_remove = Vec::new();
    5806              : 
    5807              :         // Scan all layers in the timeline (remote or on-disk).
    5808              :         //
    5809              :         // Garbage collect the layer if all conditions are satisfied:
    5810              :         // 1. it is older than cutoff LSN;
    5811              :         // 2. it is older than PITR interval;
    5812              :         // 3. it doesn't need to be retained for 'retain_lsns';
    5813              :         // 4. it does not need to be kept for LSNs holding valid leases.
    5814              :         // 5. newer on-disk image layers cover the layer's whole key range
    5815              :         //
    5816              :         // TODO holding a write lock is too agressive and avoidable
    5817            8 :         let mut guard = self.layers.write().await;
    5818            8 :         let layers = guard.layer_map()?;
    5819           48 :         'outer: for l in layers.iter_historic_layers() {
    5820           48 :             result.layers_total += 1;
    5821           48 : 
    5822           48 :             // 1. Is it newer than GC horizon cutoff point?
    5823           48 :             if l.get_lsn_range().end > space_cutoff {
    5824            4 :                 info!(
    5825            0 :                     "keeping {} because it's newer than space_cutoff {}",
    5826            0 :                     l.layer_name(),
    5827              :                     space_cutoff,
    5828              :                 );
    5829            4 :                 result.layers_needed_by_cutoff += 1;
    5830            4 :                 continue 'outer;
    5831           44 :             }
    5832           44 : 
    5833           44 :             // 2. It is newer than PiTR cutoff point?
    5834           44 :             if l.get_lsn_range().end > time_cutoff {
    5835            0 :                 info!(
    5836            0 :                     "keeping {} because it's newer than time_cutoff {}",
    5837            0 :                     l.layer_name(),
    5838              :                     time_cutoff,
    5839              :                 );
    5840            0 :                 result.layers_needed_by_pitr += 1;
    5841            0 :                 continue 'outer;
    5842           44 :             }
    5843              : 
    5844              :             // 3. Is it needed by a child branch?
    5845              :             // NOTE With that we would keep data that
    5846              :             // might be referenced by child branches forever.
    5847              :             // We can track this in child timeline GC and delete parent layers when
    5848              :             // they are no longer needed. This might be complicated with long inheritance chains.
    5849              :             //
    5850              :             // TODO Vec is not a great choice for `retain_lsns`
    5851           44 :             for retain_lsn in &retain_lsns {
    5852              :                 // start_lsn is inclusive
    5853            0 :                 if &l.get_lsn_range().start <= retain_lsn {
    5854            0 :                     info!(
    5855            0 :                         "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
    5856            0 :                         l.layer_name(),
    5857            0 :                         retain_lsn,
    5858            0 :                         l.is_incremental(),
    5859              :                     );
    5860            0 :                     result.layers_needed_by_branches += 1;
    5861            0 :                     continue 'outer;
    5862            0 :                 }
    5863              :             }
    5864              : 
    5865              :             // 4. Is there a valid lease that requires us to keep this layer?
    5866           44 :             if let Some(lsn) = &max_lsn_with_valid_lease {
    5867              :                 // keep if layer start <= any of the lease
    5868           36 :                 if &l.get_lsn_range().start <= lsn {
    5869           28 :                     info!(
    5870            0 :                         "keeping {} because there is a valid lease preventing GC at {}",
    5871            0 :                         l.layer_name(),
    5872              :                         lsn,
    5873              :                     );
    5874           28 :                     result.layers_needed_by_leases += 1;
    5875           28 :                     continue 'outer;
    5876            8 :                 }
    5877            8 :             }
    5878              : 
    5879              :             // 5. Is there a later on-disk layer for this relation?
    5880              :             //
    5881              :             // The end-LSN is exclusive, while disk_consistent_lsn is
    5882              :             // inclusive. For example, if disk_consistent_lsn is 100, it is
    5883              :             // OK for a delta layer to have end LSN 101, but if the end LSN
    5884              :             // is 102, then it might not have been fully flushed to disk
    5885              :             // before crash.
    5886              :             //
    5887              :             // For example, imagine that the following layers exist:
    5888              :             //
    5889              :             // 1000      - image (A)
    5890              :             // 1000-2000 - delta (B)
    5891              :             // 2000      - image (C)
    5892              :             // 2000-3000 - delta (D)
    5893              :             // 3000      - image (E)
    5894              :             //
    5895              :             // If GC horizon is at 2500, we can remove layers A and B, but
    5896              :             // we cannot remove C, even though it's older than 2500, because
    5897              :             // the delta layer 2000-3000 depends on it.
    5898           16 :             if !layers
    5899           16 :                 .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
    5900              :             {
    5901           12 :                 info!("keeping {} because it is the latest layer", l.layer_name());
    5902           12 :                 result.layers_not_updated += 1;
    5903           12 :                 continue 'outer;
    5904            4 :             }
    5905            4 : 
    5906            4 :             // We didn't find any reason to keep this file, so remove it.
    5907            4 :             info!(
    5908            0 :                 "garbage collecting {} is_dropped: xx is_incremental: {}",
    5909            0 :                 l.layer_name(),
    5910            0 :                 l.is_incremental(),
    5911              :             );
    5912            4 :             layers_to_remove.push(l);
    5913              :         }
    5914              : 
    5915            8 :         if !layers_to_remove.is_empty() {
    5916              :             // Persist the new GC cutoff value before we actually remove anything.
    5917              :             // This unconditionally schedules also an index_part.json update, even though, we will
    5918              :             // be doing one a bit later with the unlinked gc'd layers.
    5919            4 :             let disk_consistent_lsn = self.disk_consistent_lsn.load();
    5920            4 :             self.schedule_uploads(disk_consistent_lsn, None)
    5921            4 :                 .map_err(|e| {
    5922            0 :                     if self.cancel.is_cancelled() {
    5923            0 :                         GcError::TimelineCancelled
    5924              :                     } else {
    5925            0 :                         GcError::Remote(e)
    5926              :                     }
    5927            4 :                 })?;
    5928              : 
    5929            4 :             let gc_layers = layers_to_remove
    5930            4 :                 .iter()
    5931            4 :                 .map(|x| guard.get_from_desc(x))
    5932            4 :                 .collect::<Vec<Layer>>();
    5933            4 : 
    5934            4 :             result.layers_removed = gc_layers.len() as u64;
    5935            4 : 
    5936            4 :             self.remote_client.schedule_gc_update(&gc_layers)?;
    5937              : 
    5938            4 :             guard.open_mut()?.finish_gc_timeline(&gc_layers);
    5939            4 : 
    5940            4 :             #[cfg(feature = "testing")]
    5941            4 :             {
    5942            4 :                 result.doomed_layers = gc_layers;
    5943            4 :             }
    5944            4 :         }
    5945              : 
    5946            8 :         info!(
    5947            0 :             "GC completed removing {} layers, cutoff {}",
    5948              :             result.layers_removed, new_gc_cutoff
    5949              :         );
    5950              : 
    5951            8 :         result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
    5952            8 :         Ok(result)
    5953            8 :     }
    5954              : 
    5955              :     /// Reconstruct a value, using the given base image and WAL records in 'data'.
    5956      1332025 :     async fn reconstruct_value(
    5957      1332025 :         &self,
    5958      1332025 :         key: Key,
    5959      1332025 :         request_lsn: Lsn,
    5960      1332025 :         mut data: ValueReconstructState,
    5961      1332025 :     ) -> Result<Bytes, PageReconstructError> {
    5962      1332025 :         // Perform WAL redo if needed
    5963      1332025 :         data.records.reverse();
    5964      1332025 : 
    5965      1332025 :         // If we have a page image, and no WAL, we're all set
    5966      1332025 :         if data.records.is_empty() {
    5967      1330389 :             if let Some((img_lsn, img)) = &data.img {
    5968      1330389 :                 trace!(
    5969            0 :                     "found page image for key {} at {}, no WAL redo required, req LSN {}",
    5970              :                     key,
    5971              :                     img_lsn,
    5972              :                     request_lsn,
    5973              :                 );
    5974      1330389 :                 Ok(img.clone())
    5975              :             } else {
    5976            0 :                 Err(PageReconstructError::from(anyhow!(
    5977            0 :                     "base image for {key} at {request_lsn} not found"
    5978            0 :                 )))
    5979              :             }
    5980              :         } else {
    5981              :             // We need to do WAL redo.
    5982              :             //
    5983              :             // If we don't have a base image, then the oldest WAL record better initialize
    5984              :             // the page
    5985         1636 :             if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
    5986            0 :                 Err(PageReconstructError::from(anyhow!(
    5987            0 :                     "Base image for {} at {} not found, but got {} WAL records",
    5988            0 :                     key,
    5989            0 :                     request_lsn,
    5990            0 :                     data.records.len()
    5991            0 :                 )))
    5992              :             } else {
    5993         1636 :                 if data.img.is_some() {
    5994         1504 :                     trace!(
    5995            0 :                         "found {} WAL records and a base image for {} at {}, performing WAL redo",
    5996            0 :                         data.records.len(),
    5997              :                         key,
    5998              :                         request_lsn
    5999              :                     );
    6000              :                 } else {
    6001          132 :                     trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
    6002              :                 };
    6003         1636 :                 let res = self
    6004         1636 :                     .walredo_mgr
    6005         1636 :                     .as_ref()
    6006         1636 :                     .context("timeline has no walredo manager")
    6007         1636 :                     .map_err(PageReconstructError::WalRedo)?
    6008         1636 :                     .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
    6009         1636 :                     .await;
    6010         1636 :                 let img = match res {
    6011         1636 :                     Ok(img) => img,
    6012            0 :                     Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
    6013            0 :                     Err(walredo::Error::Other(err)) => {
    6014            0 :                         critical!("walredo failure during page reconstruction: {err:?}");
    6015            0 :                         return Err(PageReconstructError::WalRedo(
    6016            0 :                             err.context("reconstruct a page image"),
    6017            0 :                         ));
    6018              :                     }
    6019              :                 };
    6020         1636 :                 Ok(img)
    6021              :             }
    6022              :         }
    6023      1332025 :     }
    6024              : 
    6025            0 :     pub(crate) async fn spawn_download_all_remote_layers(
    6026            0 :         self: Arc<Self>,
    6027            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    6028            0 :     ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
    6029              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    6030              : 
    6031              :         // this is not really needed anymore; it has tests which really check the return value from
    6032              :         // http api. it would be better not to maintain this anymore.
    6033              : 
    6034            0 :         let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
    6035            0 :         if let Some(st) = &*status_guard {
    6036            0 :             match &st.state {
    6037              :                 DownloadRemoteLayersTaskState::Running => {
    6038            0 :                     return Err(st.clone());
    6039              :                 }
    6040              :                 DownloadRemoteLayersTaskState::ShutDown
    6041            0 :                 | DownloadRemoteLayersTaskState::Completed => {
    6042            0 :                     *status_guard = None;
    6043            0 :                 }
    6044              :             }
    6045            0 :         }
    6046              : 
    6047            0 :         let self_clone = Arc::clone(&self);
    6048            0 :         let task_id = task_mgr::spawn(
    6049            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    6050            0 :             task_mgr::TaskKind::DownloadAllRemoteLayers,
    6051            0 :             self.tenant_shard_id,
    6052            0 :             Some(self.timeline_id),
    6053            0 :             "download all remote layers task",
    6054            0 :             async move {
    6055            0 :                 self_clone.download_all_remote_layers(request).await;
    6056            0 :                 let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
    6057            0 :                  match &mut *status_guard {
    6058              :                     None => {
    6059            0 :                         warn!("tasks status is supposed to be Some(), since we are running");
    6060              :                     }
    6061            0 :                     Some(st) => {
    6062            0 :                         let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
    6063            0 :                         if st.task_id != exp_task_id {
    6064            0 :                             warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
    6065            0 :                         } else {
    6066            0 :                             st.state = DownloadRemoteLayersTaskState::Completed;
    6067            0 :                         }
    6068              :                     }
    6069              :                 };
    6070            0 :                 Ok(())
    6071            0 :             }
    6072            0 :             .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    6073              :         );
    6074              : 
    6075            0 :         let initial_info = DownloadRemoteLayersTaskInfo {
    6076            0 :             task_id: format!("{task_id}"),
    6077            0 :             state: DownloadRemoteLayersTaskState::Running,
    6078            0 :             total_layer_count: 0,
    6079            0 :             successful_download_count: 0,
    6080            0 :             failed_download_count: 0,
    6081            0 :         };
    6082            0 :         *status_guard = Some(initial_info.clone());
    6083            0 : 
    6084            0 :         Ok(initial_info)
    6085            0 :     }
    6086              : 
    6087            0 :     async fn download_all_remote_layers(
    6088            0 :         self: &Arc<Self>,
    6089            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    6090            0 :     ) {
    6091              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    6092              : 
    6093            0 :         let remaining = {
    6094            0 :             let guard = self.layers.read().await;
    6095            0 :             let Ok(lm) = guard.layer_map() else {
    6096              :                 // technically here we could look into iterating accessible layers, but downloading
    6097              :                 // all layers of a shutdown timeline makes no sense regardless.
    6098            0 :                 tracing::info!("attempted to download all layers of shutdown timeline");
    6099            0 :                 return;
    6100              :             };
    6101            0 :             lm.iter_historic_layers()
    6102            0 :                 .map(|desc| guard.get_from_desc(&desc))
    6103            0 :                 .collect::<Vec<_>>()
    6104            0 :         };
    6105            0 :         let total_layer_count = remaining.len();
    6106              : 
    6107              :         macro_rules! lock_status {
    6108              :             ($st:ident) => {
    6109              :                 let mut st = self.download_all_remote_layers_task_info.write().unwrap();
    6110              :                 let st = st
    6111              :                     .as_mut()
    6112              :                     .expect("this function is only called after the task has been spawned");
    6113              :                 assert_eq!(
    6114              :                     st.task_id,
    6115              :                     format!(
    6116              :                         "{}",
    6117              :                         task_mgr::current_task_id().expect("we run inside a task_mgr task")
    6118              :                     )
    6119              :                 );
    6120              :                 let $st = st;
    6121              :             };
    6122              :         }
    6123              : 
    6124              :         {
    6125            0 :             lock_status!(st);
    6126            0 :             st.total_layer_count = total_layer_count as u64;
    6127            0 :         }
    6128            0 : 
    6129            0 :         let mut remaining = remaining.into_iter();
    6130            0 :         let mut have_remaining = true;
    6131            0 :         let mut js = tokio::task::JoinSet::new();
    6132            0 : 
    6133            0 :         let cancel = task_mgr::shutdown_token();
    6134            0 : 
    6135            0 :         let limit = request.max_concurrent_downloads;
    6136              : 
    6137              :         loop {
    6138            0 :             while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
    6139            0 :                 let Some(next) = remaining.next() else {
    6140            0 :                     have_remaining = false;
    6141            0 :                     break;
    6142              :                 };
    6143              : 
    6144            0 :                 let span = tracing::info_span!("download", layer = %next);
    6145              : 
    6146            0 :                 js.spawn(
    6147            0 :                     async move {
    6148            0 :                         let res = next.download().await;
    6149            0 :                         (next, res)
    6150            0 :                     }
    6151            0 :                     .instrument(span),
    6152            0 :                 );
    6153            0 :             }
    6154              : 
    6155            0 :             while let Some(res) = js.join_next().await {
    6156            0 :                 match res {
    6157              :                     Ok((_, Ok(_))) => {
    6158            0 :                         lock_status!(st);
    6159            0 :                         st.successful_download_count += 1;
    6160              :                     }
    6161            0 :                     Ok((layer, Err(e))) => {
    6162            0 :                         tracing::error!(%layer, "download failed: {e:#}");
    6163            0 :                         lock_status!(st);
    6164            0 :                         st.failed_download_count += 1;
    6165              :                     }
    6166            0 :                     Err(je) if je.is_cancelled() => unreachable!("not used here"),
    6167            0 :                     Err(je) if je.is_panic() => {
    6168            0 :                         lock_status!(st);
    6169            0 :                         st.failed_download_count += 1;
    6170              :                     }
    6171            0 :                     Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
    6172              :                 }
    6173              :             }
    6174              : 
    6175            0 :             if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
    6176            0 :                 break;
    6177            0 :             }
    6178              :         }
    6179              : 
    6180              :         {
    6181            0 :             lock_status!(st);
    6182            0 :             st.state = DownloadRemoteLayersTaskState::Completed;
    6183              :         }
    6184            0 :     }
    6185              : 
    6186            0 :     pub(crate) fn get_download_all_remote_layers_task_info(
    6187            0 :         &self,
    6188            0 :     ) -> Option<DownloadRemoteLayersTaskInfo> {
    6189            0 :         self.download_all_remote_layers_task_info
    6190            0 :             .read()
    6191            0 :             .unwrap()
    6192            0 :             .clone()
    6193            0 :     }
    6194              : }
    6195              : 
    6196              : impl Timeline {
    6197              :     /// Returns non-remote layers for eviction.
    6198            0 :     pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
    6199            0 :         let guard = self.layers.read().await;
    6200            0 :         let mut max_layer_size: Option<u64> = None;
    6201            0 : 
    6202            0 :         let resident_layers = guard
    6203            0 :             .likely_resident_layers()
    6204            0 :             .map(|layer| {
    6205            0 :                 let file_size = layer.layer_desc().file_size;
    6206            0 :                 max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
    6207            0 : 
    6208            0 :                 let last_activity_ts = layer.latest_activity();
    6209            0 : 
    6210            0 :                 EvictionCandidate {
    6211            0 :                     layer: layer.to_owned().into(),
    6212            0 :                     last_activity_ts,
    6213            0 :                     relative_last_activity: finite_f32::FiniteF32::ZERO,
    6214            0 :                     visibility: layer.visibility(),
    6215            0 :                 }
    6216            0 :             })
    6217            0 :             .collect();
    6218            0 : 
    6219            0 :         DiskUsageEvictionInfo {
    6220            0 :             max_layer_size,
    6221            0 :             resident_layers,
    6222            0 :         }
    6223            0 :     }
    6224              : 
    6225         3761 :     pub(crate) fn get_shard_index(&self) -> ShardIndex {
    6226         3761 :         ShardIndex {
    6227         3761 :             shard_number: self.tenant_shard_id.shard_number,
    6228         3761 :             shard_count: self.tenant_shard_id.shard_count,
    6229         3761 :         }
    6230         3761 :     }
    6231              : 
    6232              :     /// Persistently blocks gc for `Manual` reason.
    6233              :     ///
    6234              :     /// Returns true if no such block existed before, false otherwise.
    6235            0 :     pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
    6236              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    6237            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    6238            0 :         tenant.gc_block.insert(self, GcBlockingReason::Manual).await
    6239            0 :     }
    6240              : 
    6241              :     /// Persistently unblocks gc for `Manual` reason.
    6242            0 :     pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
    6243              :         use crate::tenant::remote_timeline_client::index::GcBlockingReason;
    6244            0 :         assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
    6245            0 :         tenant.gc_block.remove(self, GcBlockingReason::Manual).await
    6246            0 :     }
    6247              : 
    6248              :     #[cfg(test)]
    6249           97 :     pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
    6250           97 :         self.last_record_lsn.advance(new_lsn);
    6251           97 :     }
    6252              : 
    6253              :     #[cfg(test)]
    6254            8 :     pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
    6255            8 :         self.disk_consistent_lsn.store(new_value);
    6256            8 :     }
    6257              : 
    6258              :     /// Force create an image layer and place it into the layer map.
    6259              :     ///
    6260              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    6261              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
    6262              :     /// placed into the layer map in one run AND be validated.
    6263              :     #[cfg(test)]
    6264          121 :     pub(super) async fn force_create_image_layer(
    6265          121 :         self: &Arc<Timeline>,
    6266          121 :         lsn: Lsn,
    6267          121 :         mut images: Vec<(Key, Bytes)>,
    6268          121 :         check_start_lsn: Option<Lsn>,
    6269          121 :         ctx: &RequestContext,
    6270          121 :     ) -> anyhow::Result<()> {
    6271          121 :         let last_record_lsn = self.get_last_record_lsn();
    6272          121 :         assert!(
    6273          121 :             lsn <= last_record_lsn,
    6274            0 :             "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
    6275              :         );
    6276          121 :         if let Some(check_start_lsn) = check_start_lsn {
    6277          121 :             assert!(lsn >= check_start_lsn);
    6278            0 :         }
    6279          348 :         images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
    6280          121 :         let min_key = *images.first().map(|(k, _)| k).unwrap();
    6281          121 :         let end_key = images.last().map(|(k, _)| k).unwrap().next();
    6282          121 :         let mut image_layer_writer = ImageLayerWriter::new(
    6283          121 :             self.conf,
    6284          121 :             self.timeline_id,
    6285          121 :             self.tenant_shard_id,
    6286          121 :             &(min_key..end_key),
    6287          121 :             lsn,
    6288          121 :             ctx,
    6289          121 :         )
    6290          121 :         .await?;
    6291          590 :         for (key, img) in images {
    6292          469 :             image_layer_writer.put_image(key, img, ctx).await?;
    6293              :         }
    6294          121 :         let (desc, path) = image_layer_writer.finish(ctx).await?;
    6295          121 :         let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    6296          121 :         info!("force created image layer {}", image_layer.local_path());
    6297              :         {
    6298          121 :             let mut guard = self.layers.write().await;
    6299          121 :             guard
    6300          121 :                 .open_mut()
    6301          121 :                 .unwrap()
    6302          121 :                 .force_insert_layer(image_layer.clone());
    6303          121 :         }
    6304          121 : 
    6305          121 :         // Update remote_timeline_client state to reflect existence of this layer
    6306          121 :         self.remote_client
    6307          121 :             .schedule_layer_file_upload(image_layer)
    6308          121 :             .unwrap();
    6309          121 : 
    6310          121 :         Ok(())
    6311          121 :     }
    6312              : 
    6313              :     /// Force create a delta layer and place it into the layer map.
    6314              :     ///
    6315              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    6316              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
    6317              :     /// placed into the layer map in one run AND be validated.
    6318              :     #[cfg(test)]
    6319          187 :     pub(super) async fn force_create_delta_layer(
    6320          187 :         self: &Arc<Timeline>,
    6321          187 :         mut deltas: DeltaLayerTestDesc,
    6322          187 :         check_start_lsn: Option<Lsn>,
    6323          187 :         ctx: &RequestContext,
    6324          187 :     ) -> anyhow::Result<()> {
    6325          187 :         let last_record_lsn = self.get_last_record_lsn();
    6326          187 :         deltas
    6327          187 :             .data
    6328          264 :             .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
    6329          187 :         assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
    6330          187 :         assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
    6331          638 :         for (_, lsn, _) in &deltas.data {
    6332          451 :             assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
    6333              :         }
    6334          187 :         assert!(
    6335          187 :             deltas.lsn_range.end <= last_record_lsn,
    6336            0 :             "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
    6337              :             deltas.lsn_range.end,
    6338              :             last_record_lsn
    6339              :         );
    6340          187 :         if let Some(check_start_lsn) = check_start_lsn {
    6341          187 :             assert!(deltas.lsn_range.start >= check_start_lsn);
    6342            0 :         }
    6343          187 :         let mut delta_layer_writer = DeltaLayerWriter::new(
    6344          187 :             self.conf,
    6345          187 :             self.timeline_id,
    6346          187 :             self.tenant_shard_id,
    6347          187 :             deltas.key_range.start,
    6348          187 :             deltas.lsn_range,
    6349          187 :             ctx,
    6350          187 :         )
    6351          187 :         .await?;
    6352          638 :         for (key, lsn, val) in deltas.data {
    6353          451 :             delta_layer_writer.put_value(key, lsn, val, ctx).await?;
    6354              :         }
    6355          187 :         let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
    6356          187 :         let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    6357          187 :         info!("force created delta layer {}", delta_layer.local_path());
    6358              :         {
    6359          187 :             let mut guard = self.layers.write().await;
    6360          187 :             guard
    6361          187 :                 .open_mut()
    6362          187 :                 .unwrap()
    6363          187 :                 .force_insert_layer(delta_layer.clone());
    6364          187 :         }
    6365          187 : 
    6366          187 :         // Update remote_timeline_client state to reflect existence of this layer
    6367          187 :         self.remote_client
    6368          187 :             .schedule_layer_file_upload(delta_layer)
    6369          187 :             .unwrap();
    6370          187 : 
    6371          187 :         Ok(())
    6372          187 :     }
    6373              : 
    6374              :     /// Return all keys at the LSN in the image layers
    6375              :     #[cfg(test)]
    6376           12 :     pub(crate) async fn inspect_image_layers(
    6377           12 :         self: &Arc<Timeline>,
    6378           12 :         lsn: Lsn,
    6379           12 :         ctx: &RequestContext,
    6380           12 :         io_concurrency: IoConcurrency,
    6381           12 :     ) -> anyhow::Result<Vec<(Key, Bytes)>> {
    6382           12 :         let mut all_data = Vec::new();
    6383           12 :         let guard = self.layers.read().await;
    6384           68 :         for layer in guard.layer_map()?.iter_historic_layers() {
    6385           68 :             if !layer.is_delta() && layer.image_layer_lsn() == lsn {
    6386           12 :                 let layer = guard.get_from_desc(&layer);
    6387           12 :                 let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
    6388           12 :                 layer
    6389           12 :                     .get_values_reconstruct_data(
    6390           12 :                         KeySpace::single(Key::MIN..Key::MAX),
    6391           12 :                         lsn..Lsn(lsn.0 + 1),
    6392           12 :                         &mut reconstruct_data,
    6393           12 :                         ctx,
    6394           12 :                     )
    6395           12 :                     .await?;
    6396          104 :                 for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
    6397          104 :                     let v = v.collect_pending_ios().await?;
    6398          104 :                     all_data.push((k, v.img.unwrap().1));
    6399              :                 }
    6400           56 :             }
    6401              :         }
    6402           12 :         all_data.sort();
    6403           12 :         Ok(all_data)
    6404           12 :     }
    6405              : 
    6406              :     /// Get all historic layer descriptors in the layer map
    6407              :     #[cfg(test)]
    6408           48 :     pub(crate) async fn inspect_historic_layers(
    6409           48 :         self: &Arc<Timeline>,
    6410           48 :     ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
    6411           48 :         let mut layers = Vec::new();
    6412           48 :         let guard = self.layers.read().await;
    6413          228 :         for layer in guard.layer_map()?.iter_historic_layers() {
    6414          228 :             layers.push(layer.key());
    6415          228 :         }
    6416           48 :         Ok(layers)
    6417           48 :     }
    6418              : 
    6419              :     #[cfg(test)]
    6420           20 :     pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
    6421           20 :         let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
    6422           20 :         keyspace.merge(&ks);
    6423           20 :         self.extra_test_dense_keyspace.store(Arc::new(keyspace));
    6424           20 :     }
    6425              : }
    6426              : 
    6427              : /// Tracking writes ingestion does to a particular in-memory layer.
    6428              : ///
    6429              : /// Cleared upon freezing a layer.
    6430              : pub(crate) struct TimelineWriterState {
    6431              :     open_layer: Arc<InMemoryLayer>,
    6432              :     current_size: u64,
    6433              :     // Previous Lsn which passed through
    6434              :     prev_lsn: Option<Lsn>,
    6435              :     // Largest Lsn which passed through the current writer
    6436              :     max_lsn: Option<Lsn>,
    6437              :     // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
    6438              :     cached_last_freeze_at: Lsn,
    6439              : }
    6440              : 
    6441              : impl TimelineWriterState {
    6442         2597 :     fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
    6443         2597 :         Self {
    6444         2597 :             open_layer,
    6445         2597 :             current_size,
    6446         2597 :             prev_lsn: None,
    6447         2597 :             max_lsn: None,
    6448         2597 :             cached_last_freeze_at: last_freeze_at,
    6449         2597 :         }
    6450         2597 :     }
    6451              : }
    6452              : 
    6453              : /// Various functions to mutate the timeline.
    6454              : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
    6455              : // This is probably considered a bad practice in Rust and should be fixed eventually,
    6456              : // but will cause large code changes.
    6457              : pub(crate) struct TimelineWriter<'a> {
    6458              :     tl: &'a Timeline,
    6459              :     write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
    6460              : }
    6461              : 
    6462              : impl Deref for TimelineWriter<'_> {
    6463              :     type Target = Timeline;
    6464              : 
    6465     19796401 :     fn deref(&self) -> &Self::Target {
    6466     19796401 :         self.tl
    6467     19796401 :     }
    6468              : }
    6469              : 
    6470              : #[derive(PartialEq)]
    6471              : enum OpenLayerAction {
    6472              :     Roll,
    6473              :     Open,
    6474              :     None,
    6475              : }
    6476              : 
    6477              : impl TimelineWriter<'_> {
    6478      9608469 :     async fn handle_open_layer_action(
    6479      9608469 :         &mut self,
    6480      9608469 :         at: Lsn,
    6481      9608469 :         action: OpenLayerAction,
    6482      9608469 :         ctx: &RequestContext,
    6483      9608469 :     ) -> anyhow::Result<&Arc<InMemoryLayer>> {
    6484      9608469 :         match action {
    6485              :             OpenLayerAction::Roll => {
    6486          160 :                 let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
    6487          160 :                 self.roll_layer(freeze_at).await?;
    6488          160 :                 self.open_layer(at, ctx).await?;
    6489              :             }
    6490         2437 :             OpenLayerAction::Open => self.open_layer(at, ctx).await?,
    6491              :             OpenLayerAction::None => {
    6492      9605872 :                 assert!(self.write_guard.is_some());
    6493              :             }
    6494              :         }
    6495              : 
    6496      9608469 :         Ok(&self.write_guard.as_ref().unwrap().open_layer)
    6497      9608469 :     }
    6498              : 
    6499         2597 :     async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
    6500         2597 :         let layer = self
    6501         2597 :             .tl
    6502         2597 :             .get_layer_for_write(at, &self.write_guard, ctx)
    6503         2597 :             .await?;
    6504         2597 :         let initial_size = layer.size().await?;
    6505              : 
    6506         2597 :         let last_freeze_at = self.last_freeze_at.load();
    6507         2597 :         self.write_guard.replace(TimelineWriterState::new(
    6508         2597 :             layer,
    6509         2597 :             initial_size,
    6510         2597 :             last_freeze_at,
    6511         2597 :         ));
    6512         2597 : 
    6513         2597 :         Ok(())
    6514         2597 :     }
    6515              : 
    6516          160 :     async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
    6517          160 :         let current_size = self.write_guard.as_ref().unwrap().current_size;
    6518              : 
    6519              :         // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
    6520              :         // to propagate the backpressure up into WAL ingestion.
    6521          160 :         let l0_count = self
    6522          160 :             .tl
    6523          160 :             .layers
    6524          160 :             .read()
    6525          160 :             .await
    6526          160 :             .layer_map()?
    6527          160 :             .level0_deltas()
    6528          160 :             .len();
    6529          160 :         let wait_thresholds = [
    6530          160 :             self.get_l0_flush_delay_threshold(),
    6531          160 :             self.get_l0_flush_stall_threshold(),
    6532          160 :         ];
    6533          160 :         let wait_threshold = wait_thresholds.into_iter().flatten().min();
    6534              : 
    6535              :         // self.write_guard will be taken by the freezing
    6536          160 :         let flush_id = self
    6537          160 :             .tl
    6538          160 :             .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
    6539          160 :             .await?;
    6540              : 
    6541          160 :         assert!(self.write_guard.is_none());
    6542              : 
    6543          160 :         if let Some(wait_threshold) = wait_threshold {
    6544            0 :             if l0_count >= wait_threshold {
    6545            0 :                 info!("layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers");
    6546            0 :                 self.tl.wait_flush_completion(flush_id).await?;
    6547            0 :             }
    6548          160 :         }
    6549              : 
    6550          160 :         if current_size >= self.get_checkpoint_distance() * 2 {
    6551            0 :             warn!("Flushed oversized open layer with size {}", current_size)
    6552          160 :         }
    6553              : 
    6554          160 :         Ok(())
    6555          160 :     }
    6556              : 
    6557      9608469 :     fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
    6558      9608469 :         let state = &*self.write_guard;
    6559      9608469 :         let Some(state) = &state else {
    6560         2437 :             return OpenLayerAction::Open;
    6561              :         };
    6562              : 
    6563              :         #[cfg(feature = "testing")]
    6564      9606032 :         if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
    6565              :             // this check and assertion are not really needed because
    6566              :             // LayerManager::try_freeze_in_memory_layer will always clear out the
    6567              :             // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
    6568              :             // is no TimelineWriterState.
    6569            0 :             assert!(
    6570            0 :                 state.open_layer.end_lsn.get().is_some(),
    6571            0 :                 "our open_layer must be outdated"
    6572              :             );
    6573              : 
    6574              :             // this would be a memory leak waiting to happen because the in-memory layer always has
    6575              :             // an index
    6576            0 :             panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
    6577      9606032 :         }
    6578      9606032 : 
    6579      9606032 :         if state.prev_lsn == Some(lsn) {
    6580              :             // Rolling mid LSN is not supported by [downstream code].
    6581              :             // Hence, only roll at LSN boundaries.
    6582              :             //
    6583              :             // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
    6584           12 :             return OpenLayerAction::None;
    6585      9606020 :         }
    6586      9606020 : 
    6587      9606020 :         if state.current_size == 0 {
    6588              :             // Don't roll empty layers
    6589            0 :             return OpenLayerAction::None;
    6590      9606020 :         }
    6591      9606020 : 
    6592      9606020 :         if self.tl.should_roll(
    6593      9606020 :             state.current_size,
    6594      9606020 :             state.current_size + new_value_size,
    6595      9606020 :             self.get_checkpoint_distance(),
    6596      9606020 :             lsn,
    6597      9606020 :             state.cached_last_freeze_at,
    6598      9606020 :             state.open_layer.get_opened_at(),
    6599      9606020 :         ) {
    6600          160 :             OpenLayerAction::Roll
    6601              :         } else {
    6602      9605860 :             OpenLayerAction::None
    6603              :         }
    6604      9608469 :     }
    6605              : 
    6606              :     /// Put a batch of keys at the specified Lsns.
    6607      9608465 :     pub(crate) async fn put_batch(
    6608      9608465 :         &mut self,
    6609      9608465 :         batch: SerializedValueBatch,
    6610      9608465 :         ctx: &RequestContext,
    6611      9608465 :     ) -> anyhow::Result<()> {
    6612      9608465 :         if !batch.has_data() {
    6613            0 :             return Ok(());
    6614      9608465 :         }
    6615      9608465 : 
    6616      9608465 :         // In debug builds, assert that we don't write any keys that don't belong to this shard.
    6617      9608465 :         // We don't assert this in release builds, since key ownership policies may change over
    6618      9608465 :         // time. Stray keys will be removed during compaction.
    6619      9608465 :         if cfg!(debug_assertions) {
    6620     19789832 :             for metadata in &batch.metadata {
    6621     10181367 :                 if let ValueMeta::Serialized(metadata) = metadata {
    6622     10181367 :                     let key = Key::from_compact(metadata.key);
    6623     10181367 :                     assert!(
    6624     10181367 :                         self.shard_identity.is_key_local(&key)
    6625            0 :                             || self.shard_identity.is_key_global(&key),
    6626            0 :                         "key {key} does not belong on shard {}",
    6627            0 :                         self.shard_identity.shard_index()
    6628              :                     );
    6629            0 :                 }
    6630              :             }
    6631            0 :         }
    6632              : 
    6633      9608465 :         let batch_max_lsn = batch.max_lsn;
    6634      9608465 :         let buf_size: u64 = batch.buffer_size() as u64;
    6635      9608465 : 
    6636      9608465 :         let action = self.get_open_layer_action(batch_max_lsn, buf_size);
    6637      9608465 :         let layer = self
    6638      9608465 :             .handle_open_layer_action(batch_max_lsn, action, ctx)
    6639      9608465 :             .await?;
    6640              : 
    6641      9608465 :         let res = layer.put_batch(batch, ctx).await;
    6642              : 
    6643      9608465 :         if res.is_ok() {
    6644      9608465 :             // Update the current size only when the entire write was ok.
    6645      9608465 :             // In case of failures, we may have had partial writes which
    6646      9608465 :             // render the size tracking out of sync. That's ok because
    6647      9608465 :             // the checkpoint distance should be significantly smaller
    6648      9608465 :             // than the S3 single shot upload limit of 5GiB.
    6649      9608465 :             let state = self.write_guard.as_mut().unwrap();
    6650      9608465 : 
    6651      9608465 :             state.current_size += buf_size;
    6652      9608465 :             state.prev_lsn = Some(batch_max_lsn);
    6653      9608465 :             state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
    6654      9608465 :         }
    6655              : 
    6656      9608465 :         res
    6657      9608465 :     }
    6658              : 
    6659              :     #[cfg(test)]
    6660              :     /// Test helper, for tests that would like to poke individual values without composing a batch
    6661      8780308 :     pub(crate) async fn put(
    6662      8780308 :         &mut self,
    6663      8780308 :         key: Key,
    6664      8780308 :         lsn: Lsn,
    6665      8780308 :         value: &Value,
    6666      8780308 :         ctx: &RequestContext,
    6667      8780308 :     ) -> anyhow::Result<()> {
    6668              :         use utils::bin_ser::BeSer;
    6669      8780308 :         if !key.is_valid_key_on_write_path() {
    6670            0 :             bail!(
    6671            0 :                 "the request contains data not supported by pageserver at TimelineWriter::put: {}",
    6672            0 :                 key
    6673            0 :             );
    6674      8780308 :         }
    6675      8780308 :         let val_ser_size = value.serialized_size().unwrap() as usize;
    6676      8780308 :         let batch = SerializedValueBatch::from_values(vec![(
    6677      8780308 :             key.to_compact(),
    6678      8780308 :             lsn,
    6679      8780308 :             val_ser_size,
    6680      8780308 :             value.clone(),
    6681      8780308 :         )]);
    6682      8780308 : 
    6683      8780308 :         self.put_batch(batch, ctx).await
    6684      8780308 :     }
    6685              : 
    6686            4 :     pub(crate) async fn delete_batch(
    6687            4 :         &mut self,
    6688            4 :         batch: &[(Range<Key>, Lsn)],
    6689            4 :         ctx: &RequestContext,
    6690            4 :     ) -> anyhow::Result<()> {
    6691            4 :         if let Some((_, lsn)) = batch.first() {
    6692            4 :             let action = self.get_open_layer_action(*lsn, 0);
    6693            4 :             let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
    6694            4 :             layer.put_tombstones(batch).await?;
    6695            0 :         }
    6696              : 
    6697            4 :         Ok(())
    6698            4 :     }
    6699              : 
    6700              :     /// Track the end of the latest digested WAL record.
    6701              :     /// Remember the (end of) last valid WAL record remembered in the timeline.
    6702              :     ///
    6703              :     /// Call this after you have finished writing all the WAL up to 'lsn'.
    6704              :     ///
    6705              :     /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
    6706              :     /// the 'lsn' or anything older. The previous last record LSN is stored alongside
    6707              :     /// the latest and can be read.
    6708     10558189 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    6709     10558189 :         self.tl.finish_write(new_lsn);
    6710     10558189 :     }
    6711              : 
    6712       541140 :     pub(crate) fn update_current_logical_size(&self, delta: i64) {
    6713       541140 :         self.tl.update_current_logical_size(delta)
    6714       541140 :     }
    6715              : }
    6716              : 
    6717              : // We need TimelineWriter to be send in upcoming conversion of
    6718              : // Timeline::layers to tokio::sync::RwLock.
    6719              : #[test]
    6720            4 : fn is_send() {
    6721            4 :     fn _assert_send<T: Send>() {}
    6722            4 :     _assert_send::<TimelineWriter<'_>>();
    6723            4 : }
    6724              : 
    6725              : #[cfg(test)]
    6726              : mod tests {
    6727              :     use std::sync::Arc;
    6728              : 
    6729              :     use pageserver_api::key::Key;
    6730              :     use pageserver_api::value::Value;
    6731              :     use tracing::Instrument;
    6732              :     use utils::{id::TimelineId, lsn::Lsn};
    6733              : 
    6734              :     use crate::tenant::{
    6735              :         harness::{test_img, TenantHarness},
    6736              :         layer_map::LayerMap,
    6737              :         storage_layer::{Layer, LayerName, LayerVisibilityHint},
    6738              :         timeline::{DeltaLayerTestDesc, EvictionError},
    6739              :         PreviousHeatmap, Timeline,
    6740              :     };
    6741              : 
    6742              :     use super::HeatMapTimeline;
    6743              : 
    6744           22 :     fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
    6745           22 :         assert_eq!(lhs.layers.len(), rhs.layers.len());
    6746           21 :         let lhs_rhs = lhs.layers.iter().zip(rhs.layers.iter());
    6747          105 :         for (l, r) in lhs_rhs {
    6748           84 :             assert_eq!(l.name, r.name);
    6749           84 :             assert_eq!(l.metadata, r.metadata);
    6750              :         }
    6751           21 :     }
    6752              : 
    6753              :     #[tokio::test]
    6754            5 :     async fn test_heatmap_generation() {
    6755            5 :         let harness = TenantHarness::create("heatmap_generation").await.unwrap();
    6756            5 : 
    6757            5 :         let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    6758            5 :             Lsn(0x10)..Lsn(0x20),
    6759            5 :             vec![(
    6760            5 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    6761            5 :                 Lsn(0x11),
    6762            5 :                 Value::Image(test_img("foo")),
    6763            5 :             )],
    6764            5 :         );
    6765            5 :         let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
    6766            5 :             Lsn(0x10)..Lsn(0x20),
    6767            5 :             vec![(
    6768            5 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    6769            5 :                 Lsn(0x11),
    6770            5 :                 Value::Image(test_img("foo")),
    6771            5 :             )],
    6772            5 :         );
    6773            5 :         let l0_delta = DeltaLayerTestDesc::new(
    6774            5 :             Lsn(0x20)..Lsn(0x30),
    6775            5 :             Key::from_hex("000000000000000000000000000000000000").unwrap()
    6776            5 :                 ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
    6777            5 :             vec![(
    6778            5 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    6779            5 :                 Lsn(0x25),
    6780            5 :                 Value::Image(test_img("foo")),
    6781            5 :             )],
    6782            5 :         );
    6783            5 :         let delta_layers = vec![
    6784            5 :             covered_delta.clone(),
    6785            5 :             visible_delta.clone(),
    6786            5 :             l0_delta.clone(),
    6787            5 :         ];
    6788            5 : 
    6789            5 :         let image_layer = (
    6790            5 :             Lsn(0x40),
    6791            5 :             vec![(
    6792            5 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    6793            5 :                 test_img("bar"),
    6794            5 :             )],
    6795            5 :         );
    6796            5 :         let image_layers = vec![image_layer];
    6797            5 : 
    6798            5 :         let (tenant, ctx) = harness.load().await;
    6799            5 :         let timeline = tenant
    6800            5 :             .create_test_timeline_with_layers(
    6801            5 :                 TimelineId::generate(),
    6802            5 :                 Lsn(0x10),
    6803            5 :                 14,
    6804            5 :                 &ctx,
    6805            5 :                 delta_layers,
    6806            5 :                 image_layers,
    6807            5 :                 Lsn(0x100),
    6808            5 :             )
    6809            5 :             .await
    6810            5 :             .unwrap();
    6811            5 : 
    6812            5 :         // Layer visibility is an input to heatmap generation, so refresh it first
    6813            5 :         timeline.update_layer_visibility().await.unwrap();
    6814            5 : 
    6815            5 :         let heatmap = timeline
    6816            5 :             .generate_heatmap()
    6817            5 :             .await
    6818            5 :             .expect("Infallible while timeline is not shut down");
    6819            5 : 
    6820            5 :         assert_eq!(heatmap.timeline_id, timeline.timeline_id);
    6821            5 : 
    6822            5 :         // L0 should come last
    6823            5 :         assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
    6824            5 : 
    6825            5 :         let mut last_lsn = Lsn::MAX;
    6826           25 :         for layer in &heatmap.layers {
    6827            5 :             // Covered layer should be omitted
    6828           20 :             assert!(layer.name != covered_delta.layer_name());
    6829            5 : 
    6830           20 :             let layer_lsn = match &layer.name {
    6831           10 :                 LayerName::Delta(d) => d.lsn_range.end,
    6832           10 :                 LayerName::Image(i) => i.lsn,
    6833            5 :             };
    6834            5 : 
    6835            5 :             // Apart from L0s, newest Layers should come first
    6836           20 :             if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
    6837           15 :                 assert!(layer_lsn <= last_lsn);
    6838           15 :                 last_lsn = layer_lsn;
    6839            5 :             }
    6840            5 :         }
    6841            5 : 
    6842            5 :         // Evict all the layers and stash the old heatmap in the timeline.
    6843            5 :         // This simulates a migration to a cold secondary location.
    6844            5 : 
    6845            5 :         let guard = timeline.layers.read().await;
    6846            5 :         let mut all_layers = Vec::new();
    6847            5 :         let forever = std::time::Duration::from_secs(120);
    6848           25 :         for layer in guard.likely_resident_layers() {
    6849           25 :             all_layers.push(layer.clone());
    6850           25 :             layer.evict_and_wait(forever).await.unwrap();
    6851            5 :         }
    6852            5 :         drop(guard);
    6853            5 : 
    6854            5 :         timeline
    6855            5 :             .previous_heatmap
    6856            5 :             .store(Some(Arc::new(PreviousHeatmap::Active {
    6857            5 :                 heatmap: heatmap.clone(),
    6858            5 :                 read_at: std::time::Instant::now(),
    6859            5 :             })));
    6860            5 : 
    6861            5 :         // Generate a new heatmap and assert that it contains the same layers as the old one.
    6862            5 :         let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
    6863            5 :         assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
    6864            5 : 
    6865            5 :         // Download each layer one by one. Generate the heatmap at each step and check
    6866            5 :         // that it's stable.
    6867           26 :         for layer in all_layers {
    6868           22 :             if layer.visibility() == LayerVisibilityHint::Covered {
    6869            5 :                 continue;
    6870           18 :             }
    6871           18 : 
    6872           18 :             eprintln!("Downloading {layer} and re-generating heatmap");
    6873            5 : 
    6874           18 :             let _resident = layer
    6875           18 :                 .download_and_keep_resident()
    6876           18 :                 .instrument(tracing::info_span!(
    6877           17 :                     parent: None,
    6878            5 :                     "download_layer",
    6879            5 :                     tenant_id = %timeline.tenant_shard_id.tenant_id,
    6880            0 :                     shard_id = %timeline.tenant_shard_id.shard_slug(),
    6881            0 :                     timeline_id = %timeline.timeline_id
    6882            5 :                 ))
    6883           17 :                 .await
    6884           17 :                 .unwrap();
    6885            5 : 
    6886           17 :             let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
    6887           17 :             assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
    6888            5 :         }
    6889            5 : 
    6890            5 :         // Everything from the post-migration heatmap is now resident.
    6891            5 :         // Check that we drop it from memory.
    6892            5 :         assert!(matches!(
    6893            5 :             timeline.previous_heatmap.load().as_deref(),
    6894            5 :             Some(PreviousHeatmap::Obsolete)
    6895            5 :         ));
    6896            5 :     }
    6897              : 
    6898              :     #[tokio::test]
    6899            4 :     async fn test_previous_heatmap_obsoletion() {
    6900            4 :         let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
    6901            4 :             .await
    6902            4 :             .unwrap();
    6903            4 : 
    6904            4 :         let l0_delta = DeltaLayerTestDesc::new(
    6905            4 :             Lsn(0x20)..Lsn(0x30),
    6906            4 :             Key::from_hex("000000000000000000000000000000000000").unwrap()
    6907            4 :                 ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
    6908            4 :             vec![(
    6909            4 :                 Key::from_hex("720000000033333333444444445500000000").unwrap(),
    6910            4 :                 Lsn(0x25),
    6911            4 :                 Value::Image(test_img("foo")),
    6912            4 :             )],
    6913            4 :         );
    6914            4 : 
    6915            4 :         let image_layer = (
    6916            4 :             Lsn(0x40),
    6917            4 :             vec![(
    6918            4 :                 Key::from_hex("620000000033333333444444445500000000").unwrap(),
    6919            4 :                 test_img("bar"),
    6920            4 :             )],
    6921            4 :         );
    6922            4 : 
    6923            4 :         let delta_layers = vec![l0_delta];
    6924            4 :         let image_layers = vec![image_layer];
    6925            4 : 
    6926            4 :         let (tenant, ctx) = harness.load().await;
    6927            4 :         let timeline = tenant
    6928            4 :             .create_test_timeline_with_layers(
    6929            4 :                 TimelineId::generate(),
    6930            4 :                 Lsn(0x10),
    6931            4 :                 14,
    6932            4 :                 &ctx,
    6933            4 :                 delta_layers,
    6934            4 :                 image_layers,
    6935            4 :                 Lsn(0x100),
    6936            4 :             )
    6937            4 :             .await
    6938            4 :             .unwrap();
    6939            4 : 
    6940            4 :         // Layer visibility is an input to heatmap generation, so refresh it first
    6941            4 :         timeline.update_layer_visibility().await.unwrap();
    6942            4 : 
    6943            4 :         let heatmap = timeline
    6944            4 :             .generate_heatmap()
    6945            4 :             .await
    6946            4 :             .expect("Infallible while timeline is not shut down");
    6947            4 : 
    6948            4 :         // Both layers should be in the heatmap
    6949            4 :         assert!(!heatmap.layers.is_empty());
    6950            4 : 
    6951            4 :         // Now simulate a migration.
    6952            4 :         timeline
    6953            4 :             .previous_heatmap
    6954            4 :             .store(Some(Arc::new(PreviousHeatmap::Active {
    6955            4 :                 heatmap: heatmap.clone(),
    6956            4 :                 read_at: std::time::Instant::now(),
    6957            4 :             })));
    6958            4 : 
    6959            4 :         // Evict all the layers in the previous heatmap
    6960            4 :         let guard = timeline.layers.read().await;
    6961            4 :         let forever = std::time::Duration::from_secs(120);
    6962           12 :         for layer in guard.likely_resident_layers() {
    6963           12 :             layer.evict_and_wait(forever).await.unwrap();
    6964            4 :         }
    6965            4 :         drop(guard);
    6966            4 : 
    6967            4 :         // Generate a new heatmap and check that the previous heatmap
    6968            4 :         // has been marked obsolete.
    6969            4 :         let post_eviction_heatmap = timeline
    6970            4 :             .generate_heatmap()
    6971            4 :             .await
    6972            4 :             .expect("Infallible while timeline is not shut down");
    6973            4 : 
    6974            4 :         assert!(post_eviction_heatmap.layers.is_empty());
    6975            4 :         assert!(matches!(
    6976            4 :             timeline.previous_heatmap.load().as_deref(),
    6977            4 :             Some(PreviousHeatmap::Obsolete)
    6978            4 :         ));
    6979            4 :     }
    6980              : 
    6981              :     #[tokio::test]
    6982            4 :     async fn two_layer_eviction_attempts_at_the_same_time() {
    6983            4 :         let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
    6984            4 :             .await
    6985            4 :             .unwrap();
    6986            4 : 
    6987            4 :         let (tenant, ctx) = harness.load().await;
    6988            4 :         let timeline = tenant
    6989            4 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
    6990            4 :             .await
    6991            4 :             .unwrap();
    6992            4 : 
    6993            4 :         let layer = find_some_layer(&timeline).await;
    6994            4 :         let layer = layer
    6995            4 :             .keep_resident()
    6996            4 :             .await
    6997            4 :             .expect("no download => no downloading errors")
    6998            4 :             .drop_eviction_guard();
    6999            4 : 
    7000            4 :         let forever = std::time::Duration::from_secs(120);
    7001            4 : 
    7002            4 :         let first = layer.evict_and_wait(forever);
    7003            4 :         let second = layer.evict_and_wait(forever);
    7004            4 : 
    7005            4 :         let (first, second) = tokio::join!(first, second);
    7006            4 : 
    7007            4 :         let res = layer.keep_resident().await;
    7008            4 :         assert!(res.is_none(), "{res:?}");
    7009            4 : 
    7010            4 :         match (first, second) {
    7011            4 :             (Ok(()), Ok(())) => {
    7012            4 :                 // because there are no more timeline locks being taken on eviction path, we can
    7013            4 :                 // witness all three outcomes here.
    7014            4 :             }
    7015            4 :             (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
    7016            0 :                 // if one completes before the other, this is fine just as well.
    7017            0 :             }
    7018            4 :             other => unreachable!("unexpected {:?}", other),
    7019            4 :         }
    7020            4 :     }
    7021              : 
    7022            4 :     async fn find_some_layer(timeline: &Timeline) -> Layer {
    7023            4 :         let layers = timeline.layers.read().await;
    7024            4 :         let desc = layers
    7025            4 :             .layer_map()
    7026            4 :             .unwrap()
    7027            4 :             .iter_historic_layers()
    7028            4 :             .next()
    7029            4 :             .expect("must find one layer to evict");
    7030            4 : 
    7031            4 :         layers.get_from_desc(&desc)
    7032            4 :     }
    7033              : }
        

Generated by: LCOV version 2.1-beta